1/* 2 * linux/fs/ext3/inode.c 3 * 4 * Copyright (C) 1992, 1993, 1994, 1995 5 * Remy Card (card@masi.ibp.fr) 6 * Laboratoire MASI - Institut Blaise Pascal 7 * Universite Pierre et Marie Curie (Paris VI) 8 * 9 * from 10 * 11 * linux/fs/minix/inode.c 12 * 13 * Copyright (C) 1991, 1992 Linus Torvalds 14 * 15 * Goal-directed block allocation by Stephen Tweedie 16 * (sct@redhat.com), 1993, 1998 17 * Big-endian to little-endian byte-swapping/bitmaps by 18 * David S. Miller (davem@caip.rutgers.edu), 1995 19 * 64-bit file support on 64-bit platforms by Jakub Jelinek 20 * (jj@sunsite.ms.mff.cuni.cz) 21 * 22 * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000 23 */ 24 25#include <linux/fs.h> 26#include <linux/sched.h> 27#include <linux/ext3_jbd.h> 28#include <linux/jbd.h> 29#include <linux/locks.h> 30#include <linux/smp_lock.h> 31#include <linux/highuid.h> 32#include <linux/quotaops.h> 33#include <linux/module.h> 34 35/* 36 * SEARCH_FROM_ZERO forces each block allocation to search from the start 37 * of the filesystem. This is to force rapid reallocation of recently-freed 38 * blocks. The file fragmentation is horrendous. 39 */ 40#undef SEARCH_FROM_ZERO 41 42/* The ext3 forget function must perform a revoke if we are freeing data 43 * which has been journaled. Metadata (eg. indirect blocks) must be 44 * revoked in all cases. 45 * 46 * "bh" may be NULL: a metadata block may have been freed from memory 47 * but there may still be a record of it in the journal, and that record 48 * still needs to be revoked. 49 */ 50 51static int ext3_forget(handle_t *handle, int is_metadata, 52 struct inode *inode, struct buffer_head *bh, 53 int blocknr) 54{ 55 int err; 56 57 BUFFER_TRACE(bh, "enter"); 58 59 jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, " 60 "data mode %lx\n", 61 bh, is_metadata, inode->i_mode, 62 test_opt(inode->i_sb, DATA_FLAGS)); 63 64 /* Never use the revoke function if we are doing full data 65 * journaling: there is no need to, and a V1 superblock won't 66 * support it. Otherwise, only skip the revoke on un-journaled 67 * data blocks. */ 68 69 if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA || 70 (!is_metadata && !ext3_should_journal_data(inode))) { 71 if (bh) { 72 BUFFER_TRACE(bh, "call journal_forget"); 73 ext3_journal_forget(handle, bh); 74 } 75 return 0; 76 } 77 78 /* 79 * data!=journal && (is_metadata || should_journal_data(inode)) 80 */ 81 BUFFER_TRACE(bh, "call ext3_journal_revoke"); 82 err = ext3_journal_revoke(handle, blocknr, bh); 83 if (err) 84 ext3_abort(inode->i_sb, __FUNCTION__, 85 "error %d when attempting revoke", err); 86 BUFFER_TRACE(bh, "exit"); 87 return err; 88} 89 90/* 91 * Truncate transactions can be complex and absolutely huge. So we need to 92 * be able to restart the transaction at a conventient checkpoint to make 93 * sure we don't overflow the journal. 94 * 95 * start_transaction gets us a new handle for a truncate transaction, 96 * and extend_transaction tries to extend the existing one a bit. If 97 * extend fails, we need to propagate the failure up and restart the 98 * transaction in the top-level truncate loop. --sct 99 */ 100 101static handle_t *start_transaction(struct inode *inode) 102{ 103 long needed; 104 handle_t *result; 105 106 needed = inode->i_blocks; 107 if (needed > EXT3_MAX_TRANS_DATA) 108 needed = EXT3_MAX_TRANS_DATA; 109 110 result = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS + needed); 111 if (!IS_ERR(result)) 112 return result; 113 114 ext3_std_error(inode->i_sb, PTR_ERR(result)); 115 return result; 116} 117 118/* 119 * Try to extend this transaction for the purposes of truncation. 120 * 121 * Returns 0 if we managed to create more room. If we can't create more 122 * room, and the transaction must be restarted we return 1. 123 */ 124static int try_to_extend_transaction(handle_t *handle, struct inode *inode) 125{ 126 long needed; 127 128 if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS) 129 return 0; 130 needed = inode->i_blocks; 131 if (needed > EXT3_MAX_TRANS_DATA) 132 needed = EXT3_MAX_TRANS_DATA; 133 if (!ext3_journal_extend(handle, EXT3_RESERVE_TRANS_BLOCKS + needed)) 134 return 0; 135 return 1; 136} 137 138/* 139 * Restart the transaction associated with *handle. This does a commit, 140 * so before we call here everything must be consistently dirtied against 141 * this transaction. 142 */ 143static int ext3_journal_test_restart(handle_t *handle, struct inode *inode) 144{ 145 long needed = inode->i_blocks; 146 if (needed > EXT3_MAX_TRANS_DATA) 147 needed = EXT3_MAX_TRANS_DATA; 148 jbd_debug(2, "restarting handle %p\n", handle); 149 return ext3_journal_restart(handle, EXT3_DATA_TRANS_BLOCKS + needed); 150} 151 152/* 153 * Called at each iput() 154 */ 155void ext3_put_inode (struct inode * inode) 156{ 157 ext3_discard_prealloc (inode); 158} 159 160/* 161 * Called at the last iput() if i_nlink is zero. 162 */ 163void ext3_delete_inode (struct inode * inode) 164{ 165 handle_t *handle; 166 167 if (is_bad_inode(inode) || 168 inode->i_ino == EXT3_ACL_IDX_INO || 169 inode->i_ino == EXT3_ACL_DATA_INO) 170 goto no_delete; 171 172 lock_kernel(); 173 handle = start_transaction(inode); 174 if (IS_ERR(handle)) { 175 /* If we're going to skip the normal cleanup, we still 176 * need to make sure that the in-core orphan linked list 177 * is properly cleaned up. */ 178 ext3_orphan_del(NULL, inode); 179 180 ext3_std_error(inode->i_sb, PTR_ERR(handle)); 181 unlock_kernel(); 182 goto no_delete; 183 } 184 185 if (IS_SYNC(inode)) 186 handle->h_sync = 1; 187 inode->i_size = 0; 188 if (inode->i_blocks) 189 ext3_truncate(inode); 190 /* 191 * Kill off the orphan record which ext3_truncate created. 192 * AKPM: I think this can be inside the above `if'. 193 * Note that ext3_orphan_del() has to be able to cope with the 194 * deletion of a non-existent orphan - this is because we don't 195 * know if ext3_truncate() actually created an orphan record. 196 * (Well, we could do this if we need to, but heck - it works) 197 */ 198 ext3_orphan_del(handle, inode); 199 inode->u.ext3_i.i_dtime = CURRENT_TIME; 200 201 /* 202 * One subtle ordering requirement: if anything has gone wrong 203 * (transaction abort, IO errors, whatever), then we can still 204 * do these next steps (the fs will already have been marked as 205 * having errors), but we can't free the inode if the mark_dirty 206 * fails. 207 */ 208 if (ext3_mark_inode_dirty(handle, inode)) 209 /* If that failed, just do the required in-core inode clear. */ 210 clear_inode(inode); 211 else 212 ext3_free_inode(handle, inode); 213 ext3_journal_stop(handle, inode); 214 unlock_kernel(); 215 return; 216no_delete: 217 clear_inode(inode); /* We must guarantee clearing of inode... */ 218} 219 220void ext3_discard_prealloc (struct inode * inode) 221{ 222#ifdef EXT3_PREALLOCATE 223 lock_kernel(); 224 /* Writer: ->i_prealloc* */ 225 if (inode->u.ext3_i.i_prealloc_count) { 226 unsigned short total = inode->u.ext3_i.i_prealloc_count; 227 unsigned long block = inode->u.ext3_i.i_prealloc_block; 228 inode->u.ext3_i.i_prealloc_count = 0; 229 inode->u.ext3_i.i_prealloc_block = 0; 230 /* Writer: end */ 231 ext3_free_blocks (inode, block, total); 232 } 233 unlock_kernel(); 234#endif 235} 236 237static int ext3_alloc_block (handle_t *handle, 238 struct inode * inode, unsigned long goal, int *err) 239{ 240#ifdef EXT3FS_DEBUG 241 static unsigned long alloc_hits = 0, alloc_attempts = 0; 242#endif 243 unsigned long result; 244 245#ifdef EXT3_PREALLOCATE 246 /* Writer: ->i_prealloc* */ 247 if (inode->u.ext3_i.i_prealloc_count && 248 (goal == inode->u.ext3_i.i_prealloc_block || 249 goal + 1 == inode->u.ext3_i.i_prealloc_block)) 250 { 251 result = inode->u.ext3_i.i_prealloc_block++; 252 inode->u.ext3_i.i_prealloc_count--; 253 /* Writer: end */ 254 ext3_debug ("preallocation hit (%lu/%lu).\n", 255 ++alloc_hits, ++alloc_attempts); 256 } else { 257 ext3_discard_prealloc (inode); 258 ext3_debug ("preallocation miss (%lu/%lu).\n", 259 alloc_hits, ++alloc_attempts); 260 if (S_ISREG(inode->i_mode)) 261 result = ext3_new_block (inode, goal, 262 &inode->u.ext3_i.i_prealloc_count, 263 &inode->u.ext3_i.i_prealloc_block, err); 264 else 265 result = ext3_new_block (inode, goal, 0, 0, err); 266 /* 267 * AKPM: this is somewhat sticky. I'm not surprised it was 268 * disabled in 2.2's ext3. Need to integrate b_committed_data 269 * guarding with preallocation, if indeed preallocation is 270 * effective. 271 */ 272 } 273#else 274 result = ext3_new_block (handle, inode, goal, 0, 0, err); 275#endif 276 return result; 277} 278 279 280typedef struct { 281 u32 *p; 282 u32 key; 283 struct buffer_head *bh; 284} Indirect; 285 286static inline void add_chain(Indirect *p, struct buffer_head *bh, u32 *v) 287{ 288 p->key = *(p->p = v); 289 p->bh = bh; 290} 291 292static inline int verify_chain(Indirect *from, Indirect *to) 293{ 294 while (from <= to && from->key == *from->p) 295 from++; 296 return (from > to); 297} 298 299/** 300 * ext3_block_to_path - parse the block number into array of offsets 301 * @inode: inode in question (we are only interested in its superblock) 302 * @i_block: block number to be parsed 303 * @offsets: array to store the offsets in 304 * 305 * To store the locations of file's data ext3 uses a data structure common 306 * for UNIX filesystems - tree of pointers anchored in the inode, with 307 * data blocks at leaves and indirect blocks in intermediate nodes. 308 * This function translates the block number into path in that tree - 309 * return value is the path length and @offsets[n] is the offset of 310 * pointer to (n+1)th node in the nth one. If @block is out of range 311 * (negative or too large) warning is printed and zero returned. 312 * 313 * Note: function doesn't find node addresses, so no IO is needed. All 314 * we need to know is the capacity of indirect blocks (taken from the 315 * inode->i_sb). 316 */ 317 318/* 319 * Portability note: the last comparison (check that we fit into triple 320 * indirect block) is spelled differently, because otherwise on an 321 * architecture with 32-bit longs and 8Kb pages we might get into trouble 322 * if our filesystem had 8Kb blocks. We might use long long, but that would 323 * kill us on x86. Oh, well, at least the sign propagation does not matter - 324 * i_block would have to be negative in the very beginning, so we would not 325 * get there at all. 326 */ 327 328static int ext3_block_to_path(struct inode *inode, long i_block, int offsets[4]) 329{ 330 int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb); 331 int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb); 332 const long direct_blocks = EXT3_NDIR_BLOCKS, 333 indirect_blocks = ptrs, 334 double_blocks = (1 << (ptrs_bits * 2)); 335 int n = 0; 336 337 if (i_block < 0) { 338 ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0"); 339 } else if (i_block < direct_blocks) { 340 offsets[n++] = i_block; 341 } else if ( (i_block -= direct_blocks) < indirect_blocks) { 342 offsets[n++] = EXT3_IND_BLOCK; 343 offsets[n++] = i_block; 344 } else if ((i_block -= indirect_blocks) < double_blocks) { 345 offsets[n++] = EXT3_DIND_BLOCK; 346 offsets[n++] = i_block >> ptrs_bits; 347 offsets[n++] = i_block & (ptrs - 1); 348 } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) { 349 offsets[n++] = EXT3_TIND_BLOCK; 350 offsets[n++] = i_block >> (ptrs_bits * 2); 351 offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1); 352 offsets[n++] = i_block & (ptrs - 1); 353 } else { 354 ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); 355 } 356 return n; 357} 358 359/** 360 * ext3_get_branch - read the chain of indirect blocks leading to data 361 * @inode: inode in question 362 * @depth: depth of the chain (1 - direct pointer, etc.) 363 * @offsets: offsets of pointers in inode/indirect blocks 364 * @chain: place to store the result 365 * @err: here we store the error value 366 * 367 * Function fills the array of triples <key, p, bh> and returns %NULL 368 * if everything went OK or the pointer to the last filled triple 369 * (incomplete one) otherwise. Upon the return chain[i].key contains 370 * the number of (i+1)-th block in the chain (as it is stored in memory, 371 * i.e. little-endian 32-bit), chain[i].p contains the address of that 372 * number (it points into struct inode for i==0 and into the bh->b_data 373 * for i>0) and chain[i].bh points to the buffer_head of i-th indirect 374 * block for i>0 and NULL for i==0. In other words, it holds the block 375 * numbers of the chain, addresses they were taken from (and where we can 376 * verify that chain did not change) and buffer_heads hosting these 377 * numbers. 378 * 379 * Function stops when it stumbles upon zero pointer (absent block) 380 * (pointer to last triple returned, *@err == 0) 381 * or when it gets an IO error reading an indirect block 382 * (ditto, *@err == -EIO) 383 * or when it notices that chain had been changed while it was reading 384 * (ditto, *@err == -EAGAIN) 385 * or when it reads all @depth-1 indirect blocks successfully and finds 386 * the whole chain, all way to the data (returns %NULL, *err == 0). 387 */ 388static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets, 389 Indirect chain[4], int *err) 390{ 391 struct super_block *sb = inode->i_sb; 392 Indirect *p = chain; 393 struct buffer_head *bh; 394 395 *err = 0; 396 /* i_data is not going away, no lock needed */ 397 add_chain (chain, NULL, inode->u.ext3_i.i_data + *offsets); 398 if (!p->key) 399 goto no_block; 400 while (--depth) { 401 bh = sb_bread(sb, le32_to_cpu(p->key)); 402 if (!bh) 403 goto failure; 404 /* Reader: pointers */ 405 if (!verify_chain(chain, p)) 406 goto changed; 407 add_chain(++p, bh, (u32*)bh->b_data + *++offsets); 408 /* Reader: end */ 409 if (!p->key) 410 goto no_block; 411 } 412 return NULL; 413 414changed: 415 brelse(bh); 416 *err = -EAGAIN; 417 goto no_block; 418failure: 419 *err = -EIO; 420no_block: 421 return p; 422} 423 424/** 425 * ext3_find_near - find a place for allocation with sufficient locality 426 * @inode: owner 427 * @ind: descriptor of indirect block. 428 * 429 * This function returns the prefered place for block allocation. 430 * It is used when heuristic for sequential allocation fails. 431 * Rules are: 432 * + if there is a block to the left of our position - allocate near it. 433 * + if pointer will live in indirect block - allocate near that block. 434 * + if pointer will live in inode - allocate in the same 435 * cylinder group. 436 * Caller must make sure that @ind is valid and will stay that way. 437 */ 438 439static inline unsigned long ext3_find_near(struct inode *inode, Indirect *ind) 440{ 441 u32 *start = ind->bh ? (u32*) ind->bh->b_data : inode->u.ext3_i.i_data; 442 u32 *p; 443 444 /* Try to find previous block */ 445 for (p = ind->p - 1; p >= start; p--) 446 if (*p) 447 return le32_to_cpu(*p); 448 449 /* No such thing, so let's try location of indirect block */ 450 if (ind->bh) 451 return ind->bh->b_blocknr; 452 453 /* 454 * It is going to be refered from inode itself? OK, just put it into 455 * the same cylinder group then. 456 */ 457 return (inode->u.ext3_i.i_block_group * 458 EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + 459 le32_to_cpu(inode->i_sb->u.ext3_sb.s_es->s_first_data_block); 460} 461 462/** 463 * ext3_find_goal - find a prefered place for allocation. 464 * @inode: owner 465 * @block: block we want 466 * @chain: chain of indirect blocks 467 * @partial: pointer to the last triple within a chain 468 * @goal: place to store the result. 469 * 470 * Normally this function find the prefered place for block allocation, 471 * stores it in *@goal and returns zero. If the branch had been changed 472 * under us we return -EAGAIN. 473 */ 474 475static int ext3_find_goal(struct inode *inode, long block, Indirect chain[4], 476 Indirect *partial, unsigned long *goal) 477{ 478 /* Writer: ->i_next_alloc* */ 479 if (block == inode->u.ext3_i.i_next_alloc_block + 1) { 480 inode->u.ext3_i.i_next_alloc_block++; 481 inode->u.ext3_i.i_next_alloc_goal++; 482 } 483#ifdef SEARCH_FROM_ZERO 484 inode->u.ext3_i.i_next_alloc_block = 0; 485 inode->u.ext3_i.i_next_alloc_goal = 0; 486#endif 487 /* Writer: end */ 488 /* Reader: pointers, ->i_next_alloc* */ 489 if (verify_chain(chain, partial)) { 490 /* 491 * try the heuristic for sequential allocation, 492 * failing that at least try to get decent locality. 493 */ 494 if (block == inode->u.ext3_i.i_next_alloc_block) 495 *goal = inode->u.ext3_i.i_next_alloc_goal; 496 if (!*goal) 497 *goal = ext3_find_near(inode, partial); 498#ifdef SEARCH_FROM_ZERO 499 *goal = 0; 500#endif 501 return 0; 502 } 503 /* Reader: end */ 504 return -EAGAIN; 505} 506 507/** 508 * ext3_alloc_branch - allocate and set up a chain of blocks. 509 * @inode: owner 510 * @num: depth of the chain (number of blocks to allocate) 511 * @offsets: offsets (in the blocks) to store the pointers to next. 512 * @branch: place to store the chain in. 513 * 514 * This function allocates @num blocks, zeroes out all but the last one, 515 * links them into chain and (if we are synchronous) writes them to disk. 516 * In other words, it prepares a branch that can be spliced onto the 517 * inode. It stores the information about that chain in the branch[], in 518 * the same format as ext3_get_branch() would do. We are calling it after 519 * we had read the existing part of chain and partial points to the last 520 * triple of that (one with zero ->key). Upon the exit we have the same 521 * picture as after the successful ext3_get_block(), excpet that in one 522 * place chain is disconnected - *branch->p is still zero (we did not 523 * set the last link), but branch->key contains the number that should 524 * be placed into *branch->p to fill that gap. 525 * 526 * If allocation fails we free all blocks we've allocated (and forget 527 * their buffer_heads) and return the error value the from failed 528 * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain 529 * as described above and return 0. 530 */ 531 532static int ext3_alloc_branch(handle_t *handle, struct inode *inode, 533 int num, 534 unsigned long goal, 535 int *offsets, 536 Indirect *branch) 537{ 538 int blocksize = inode->i_sb->s_blocksize; 539 int n = 0, keys = 0; 540 int err = 0; 541 int i; 542 int parent = ext3_alloc_block(handle, inode, goal, &err); 543 544 branch[0].key = cpu_to_le32(parent); 545 if (parent) { 546 for (n = 1; n < num; n++) { 547 struct buffer_head *bh; 548 /* Allocate the next block */ 549 int nr = ext3_alloc_block(handle, inode, parent, &err); 550 if (!nr) 551 break; 552 branch[n].key = cpu_to_le32(nr); 553 keys = n+1; 554 555 /* 556 * Get buffer_head for parent block, zero it out 557 * and set the pointer to new one, then send 558 * parent to disk. 559 */ 560 bh = sb_getblk(inode->i_sb, parent); 561 branch[n].bh = bh; 562 lock_buffer(bh); 563 BUFFER_TRACE(bh, "call get_create_access"); 564 err = ext3_journal_get_create_access(handle, bh); 565 if (err) { 566 unlock_buffer(bh); 567 brelse(bh); 568 break; 569 } 570 571 memset(bh->b_data, 0, blocksize); 572 branch[n].p = (u32*) bh->b_data + offsets[n]; 573 *branch[n].p = branch[n].key; 574 BUFFER_TRACE(bh, "marking uptodate"); 575 mark_buffer_uptodate(bh, 1); 576 unlock_buffer(bh); 577 578 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 579 err = ext3_journal_dirty_metadata(handle, bh); 580 if (err) 581 break; 582 583 parent = nr; 584 } 585 } 586 if (n == num) 587 return 0; 588 589 /* Allocation failed, free what we already allocated */ 590 for (i = 1; i < keys; i++) { 591 BUFFER_TRACE(branch[i].bh, "call journal_forget"); 592 ext3_journal_forget(handle, branch[i].bh); 593 } 594 for (i = 0; i < keys; i++) 595 ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); 596 return err; 597} 598 599/** 600 * ext3_splice_branch - splice the allocated branch onto inode. 601 * @inode: owner 602 * @block: (logical) number of block we are adding 603 * @chain: chain of indirect blocks (with a missing link - see 604 * ext3_alloc_branch) 605 * @where: location of missing link 606 * @num: number of blocks we are adding 607 * 608 * This function verifies that chain (up to the missing link) had not 609 * changed, fills the missing link and does all housekeeping needed in 610 * inode (->i_blocks, etc.). In case of success we end up with the full 611 * chain to new block and return 0. Otherwise (== chain had been changed) 612 * we free the new blocks (forgetting their buffer_heads, indeed) and 613 * return -EAGAIN. 614 */ 615 616static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, 617 Indirect chain[4], Indirect *where, int num) 618{ 619 int i; 620 int err = 0; 621 622 /* 623 * If we're splicing into a [td]indirect block (as opposed to the 624 * inode) then we need to get write access to the [td]indirect block 625 * before the splice. 626 */ 627 if (where->bh) { 628 BUFFER_TRACE(where->bh, "get_write_access"); 629 err = ext3_journal_get_write_access(handle, where->bh); 630 if (err) 631 goto err_out; 632 } 633 /* Verify that place we are splicing to is still there and vacant */ 634 635 /* Writer: pointers, ->i_next_alloc* */ 636 if (!verify_chain(chain, where-1) || *where->p) 637 /* Writer: end */ 638 goto changed; 639 640 /* That's it */ 641 642 *where->p = where->key; 643 inode->u.ext3_i.i_next_alloc_block = block; 644 inode->u.ext3_i.i_next_alloc_goal = le32_to_cpu(where[num-1].key); 645#ifdef SEARCH_FROM_ZERO 646 inode->u.ext3_i.i_next_alloc_block = 0; 647 inode->u.ext3_i.i_next_alloc_goal = 0; 648#endif 649 /* Writer: end */ 650 651 /* We are done with atomic stuff, now do the rest of housekeeping */ 652 653 inode->i_ctime = CURRENT_TIME; 654 ext3_mark_inode_dirty(handle, inode); 655 656 /* had we spliced it onto indirect block? */ 657 if (where->bh) { 658 /* 659 * akpm: If we spliced it onto an indirect block, we haven't 660 * altered the inode. Note however that if it is being spliced 661 * onto an indirect block at the very end of the file (the 662 * file is growing) then we *will* alter the inode to reflect 663 * the new i_size. But that is not done here - it is done in 664 * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode. 665 */ 666 jbd_debug(5, "splicing indirect only\n"); 667 BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata"); 668 err = ext3_journal_dirty_metadata(handle, where->bh); 669 if (err) 670 goto err_out; 671 } else { 672 /* 673 * OK, we spliced it into the inode itself on a direct block. 674 * Inode was dirtied above. 675 */ 676 jbd_debug(5, "splicing direct\n"); 677 } 678 return err; 679 680changed: 681 /* 682 * AKPM: if where[i].bh isn't part of the current updating 683 * transaction then we explode nastily. Test this code path. 684 */ 685 jbd_debug(1, "the chain changed: try again\n"); 686 err = -EAGAIN; 687 688err_out: 689 for (i = 1; i < num; i++) { 690 BUFFER_TRACE(where[i].bh, "call journal_forget"); 691 ext3_journal_forget(handle, where[i].bh); 692 } 693 /* For the normal collision cleanup case, we free up the blocks. 694 * On genuine filesystem errors we don't even think about doing 695 * that. */ 696 if (err == -EAGAIN) 697 for (i = 0; i < num; i++) 698 ext3_free_blocks(handle, inode, 699 le32_to_cpu(where[i].key), 1); 700 return err; 701} 702 703/* 704 * Allocation strategy is simple: if we have to allocate something, we will 705 * have to go the whole way to leaf. So let's do it before attaching anything 706 * to tree, set linkage between the newborn blocks, write them if sync is 707 * required, recheck the path, free and repeat if check fails, otherwise 708 * set the last missing link (that will protect us from any truncate-generated 709 * removals - all blocks on the path are immune now) and possibly force the 710 * write on the parent block. 711 * That has a nice additional property: no special recovery from the failed 712 * allocations is needed - we simply release blocks and do not touch anything 713 * reachable from inode. 714 * 715 * akpm: `handle' can be NULL if create == 0. 716 * 717 * The BKL may not be held on entry here. Be sure to take it early. 718 */ 719 720static int ext3_get_block_handle(handle_t *handle, struct inode *inode, 721 long iblock, 722 struct buffer_head *bh_result, int create) 723{ 724 int err = -EIO; 725 int offsets[4]; 726 Indirect chain[4]; 727 Indirect *partial; 728 unsigned long goal; 729 int left; 730 int depth = ext3_block_to_path(inode, iblock, offsets); 731 loff_t new_size; 732 733 J_ASSERT(handle != NULL || create == 0); 734 735 if (depth == 0) 736 goto out; 737 738 lock_kernel(); 739reread: 740 partial = ext3_get_branch(inode, depth, offsets, chain, &err); 741 742 /* Simplest case - block found, no allocation needed */ 743 if (!partial) { 744 bh_result->b_state &= ~(1UL << BH_New); 745got_it: 746 bh_result->b_dev = inode->i_dev; 747 bh_result->b_blocknr = le32_to_cpu(chain[depth-1].key); 748 bh_result->b_state |= (1UL << BH_Mapped); 749 /* Clean up and exit */ 750 partial = chain+depth-1; /* the whole chain */ 751 goto cleanup; 752 } 753 754 /* Next simple case - plain lookup or failed read of indirect block */ 755 if (!create || err == -EIO) { 756cleanup: 757 while (partial > chain) { 758 BUFFER_TRACE(partial->bh, "call brelse"); 759 brelse(partial->bh); 760 partial--; 761 } 762 BUFFER_TRACE(bh_result, "returned"); 763 unlock_kernel(); 764out: 765 return err; 766 } 767 768 /* 769 * Indirect block might be removed by truncate while we were 770 * reading it. Handling of that case (forget what we've got and 771 * reread) is taken out of the main path. 772 */ 773 if (err == -EAGAIN) 774 goto changed; 775 776 if (ext3_find_goal(inode, iblock, chain, partial, &goal) < 0) 777 goto changed; 778 779 left = (chain + depth) - partial; 780 781 /* 782 * Block out ext3_truncate while we alter the tree 783 */ 784 down_read(&inode->u.ext3_i.truncate_sem); 785 err = ext3_alloc_branch(handle, inode, left, goal, 786 offsets+(partial-chain), partial); 787 788 /* The ext3_splice_branch call will free and forget any buffers 789 * on the new chain if there is a failure, but that risks using 790 * up transaction credits, especially for bitmaps where the 791 * credits cannot be returned. Can we handle this somehow? We 792 * may need to return -EAGAIN upwards in the worst case. --sct */ 793 if (!err) 794 err = ext3_splice_branch(handle, inode, iblock, chain, 795 partial, left); 796 up_read(&inode->u.ext3_i.truncate_sem); 797 if (err == -EAGAIN) 798 goto changed; 799 if (err) 800 goto cleanup; 801 802 new_size = inode->i_size; 803 /* 804 * This is not racy against ext3_truncate's modification of i_disksize 805 * because VM/VFS ensures that the file cannot be extended while 806 * truncate is in progress. It is racy between multiple parallel 807 * instances of get_block, but we have the BKL. 808 */ 809 if (new_size > inode->u.ext3_i.i_disksize) 810 inode->u.ext3_i.i_disksize = new_size; 811 812 bh_result->b_state |= (1UL << BH_New); 813 goto got_it; 814 815changed: 816 while (partial > chain) { 817 jbd_debug(1, "buffer chain changed, retrying\n"); 818 BUFFER_TRACE(partial->bh, "brelsing"); 819 brelse(partial->bh); 820 partial--; 821 } 822 goto reread; 823} 824 825/* 826 * The BKL is not held on entry here. 827 */ 828static int ext3_get_block(struct inode *inode, long iblock, 829 struct buffer_head *bh_result, int create) 830{ 831 handle_t *handle = 0; 832 int ret; 833 834 if (create) { 835 handle = ext3_journal_current_handle(); 836 J_ASSERT(handle != 0); 837 } 838 ret = ext3_get_block_handle(handle, inode, iblock, bh_result, create); 839 return ret; 840} 841 842/* 843 * `handle' can be NULL if create is zero 844 */ 845struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, 846 long block, int create, int * errp) 847{ 848 struct buffer_head dummy; 849 int fatal = 0, err; 850 851 J_ASSERT(handle != NULL || create == 0); 852 853 dummy.b_state = 0; 854 dummy.b_blocknr = -1000; 855 buffer_trace_init(&dummy.b_history); 856 *errp = ext3_get_block_handle(handle, inode, block, &dummy, create); 857 if (!*errp && buffer_mapped(&dummy)) { 858 struct buffer_head *bh; 859 bh = sb_getblk(inode->i_sb, dummy.b_blocknr); 860 if (buffer_new(&dummy)) { 861 J_ASSERT(create != 0); 862 J_ASSERT(handle != 0); 863 864 /* Now that we do not always journal data, we 865 should keep in mind whether this should 866 always journal the new buffer as metadata. 867 For now, regular file writes use 868 ext3_get_block instead, so it's not a 869 problem. */ 870 lock_kernel(); 871 lock_buffer(bh); 872 BUFFER_TRACE(bh, "call get_create_access"); 873 fatal = ext3_journal_get_create_access(handle, bh); 874 if (!fatal) { 875 memset(bh->b_data, 0, 876 inode->i_sb->s_blocksize); 877 mark_buffer_uptodate(bh, 1); 878 } 879 unlock_buffer(bh); 880 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 881 err = ext3_journal_dirty_metadata(handle, bh); 882 if (!fatal) fatal = err; 883 unlock_kernel(); 884 } else { 885 BUFFER_TRACE(bh, "not a new buffer"); 886 } 887 if (fatal) { 888 *errp = fatal; 889 brelse(bh); 890 bh = NULL; 891 } 892 return bh; 893 } 894 return NULL; 895} 896 897struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, 898 int block, int create, int *err) 899{ 900 struct buffer_head * bh; 901 int prev_blocks; 902 903 prev_blocks = inode->i_blocks; 904 905 bh = ext3_getblk (handle, inode, block, create, err); 906 if (!bh) 907 return bh; 908#ifdef EXT3_PREALLOCATE 909 /* 910 * If the inode has grown, and this is a directory, then use a few 911 * more of the preallocated blocks to keep directory fragmentation 912 * down. The preallocated blocks are guaranteed to be contiguous. 913 */ 914 if (create && 915 S_ISDIR(inode->i_mode) && 916 inode->i_blocks > prev_blocks && 917 EXT3_HAS_COMPAT_FEATURE(inode->i_sb, 918 EXT3_FEATURE_COMPAT_DIR_PREALLOC)) { 919 int i; 920 struct buffer_head *tmp_bh; 921 922 for (i = 1; 923 inode->u.ext3_i.i_prealloc_count && 924 i < EXT3_SB(inode->i_sb)->s_es->s_prealloc_dir_blocks; 925 i++) { 926 /* 927 * ext3_getblk will zero out the contents of the 928 * directory for us 929 */ 930 tmp_bh = ext3_getblk(handle, inode, 931 block+i, create, err); 932 if (!tmp_bh) { 933 brelse (bh); 934 return 0; 935 } 936 brelse (tmp_bh); 937 } 938 } 939#endif 940 if (buffer_uptodate(bh)) 941 return bh; 942 ll_rw_block (READ, 1, &bh); 943 wait_on_buffer (bh); 944 if (buffer_uptodate(bh)) 945 return bh; 946 brelse (bh); 947 *err = -EIO; 948 return NULL; 949} 950 951static int walk_page_buffers( handle_t *handle, 952 struct inode *inode, 953 struct buffer_head *head, 954 unsigned from, 955 unsigned to, 956 int *partial, 957 int (*fn)( handle_t *handle, 958 struct inode *inode, 959 struct buffer_head *bh)) 960{ 961 struct buffer_head *bh; 962 unsigned block_start, block_end; 963 unsigned blocksize = head->b_size; 964 int err, ret = 0; 965 966 for ( bh = head, block_start = 0; 967 ret == 0 && (bh != head || !block_start); 968 block_start = block_end, bh = bh->b_this_page) 969 { 970 block_end = block_start + blocksize; 971 if (block_end <= from || block_start >= to) { 972 if (partial && !buffer_uptodate(bh)) 973 *partial = 1; 974 continue; 975 } 976 err = (*fn)(handle, inode, bh); 977 if (!ret) 978 ret = err; 979 } 980 return ret; 981} 982 983/* 984 * To preserve ordering, it is essential that the hole instantiation and 985 * the data write be encapsulated in a single transaction. We cannot 986 * close off a transaction and start a new one between the ext3_get_block() 987 * and the commit_write(). So doing the journal_start at the start of 988 * prepare_write() is the right place. 989 * 990 * Also, this function can nest inside ext3_writepage() -> 991 * block_write_full_page(). In that case, we *know* that ext3_writepage() 992 * has generated enough buffer credits to do the whole page. So we won't 993 * block on the journal in that case, which is good, because the caller may 994 * be PF_MEMALLOC. 995 * 996 * By accident, ext3 can be reentered when a transaction is open via 997 * quota file writes. If we were to commit the transaction while thus 998 * reentered, there can be a deadlock - we would be holding a quota 999 * lock, and the commit would never complete if another thread had a 1000 * transaction open and was blocking on the quota lock - a ranking 1001 * violation. 1002 * 1003 * So what we do is to rely on the fact that journal_stop/journal_start 1004 * will _not_ run commit under these circumstances because handle->h_ref 1005 * is elevated. We'll still have enough credits for the tiny quotafile 1006 * write. 1007 */ 1008 1009static int do_journal_get_write_access(handle_t *handle, struct inode *inode, 1010 struct buffer_head *bh) 1011{ 1012 return ext3_journal_get_write_access(handle, bh); 1013} 1014 1015static int ext3_prepare_write(struct file *file, struct page *page, 1016 unsigned from, unsigned to) 1017{ 1018 struct inode *inode = page->mapping->host; 1019 int ret, needed_blocks = ext3_writepage_trans_blocks(inode); 1020 handle_t *handle; 1021 1022 lock_kernel(); 1023 handle = ext3_journal_start(inode, needed_blocks); 1024 if (IS_ERR(handle)) { 1025 ret = PTR_ERR(handle); 1026 goto out; 1027 } 1028 unlock_kernel(); 1029 ret = block_prepare_write(page, from, to, ext3_get_block); 1030 lock_kernel(); 1031 if (ret != 0) 1032 goto prepare_write_failed; 1033 1034 if (ext3_should_journal_data(inode)) { 1035 ret = walk_page_buffers(handle, inode, page->buffers, 1036 from, to, NULL, do_journal_get_write_access); 1037 if (ret) { 1038 /* 1039 * We're going to fail this prepare_write(), 1040 * so commit_write() will not be called. 1041 * We need to undo block_prepare_write()'s kmap(). 1042 * AKPM: Do we need to clear PageUptodate? I don't 1043 * think so. 1044 */ 1045 kunmap(page); 1046 } 1047 } 1048prepare_write_failed: 1049 if (ret) 1050 ext3_journal_stop(handle, inode); 1051out: 1052 unlock_kernel(); 1053 return ret; 1054} 1055 1056static int journal_dirty_sync_data(handle_t *handle, struct inode *inode, 1057 struct buffer_head *bh) 1058{ 1059 int ret = ext3_journal_dirty_data(handle, bh, 0); 1060 buffer_insert_inode_data_queue(bh, inode); 1061 return ret; 1062} 1063 1064/* 1065 * For ext3_writepage(). We also brelse() the buffer to account for 1066 * the bget() which ext3_writepage() performs. 1067 */ 1068static int journal_dirty_async_data(handle_t *handle, struct inode *inode, 1069 struct buffer_head *bh) 1070{ 1071 int ret = ext3_journal_dirty_data(handle, bh, 1); 1072 buffer_insert_inode_data_queue(bh, inode); 1073 __brelse(bh); 1074 return ret; 1075} 1076 1077/* For commit_write() in data=journal mode */ 1078static int commit_write_fn(handle_t *handle, struct inode *inode, 1079 struct buffer_head *bh) 1080{ 1081 set_bit(BH_Uptodate, &bh->b_state); 1082 return ext3_journal_dirty_metadata(handle, bh); 1083} 1084 1085/* 1086 * We need to pick up the new inode size which generic_commit_write gave us 1087 * `file' can be NULL - eg, when called from block_symlink(). 1088 * 1089 * ext3 inode->i_dirty_buffers policy: If we're journalling data we 1090 * definitely don't want them to appear on the inode at all - instead 1091 * we need to manage them at the JBD layer and we need to intercept 1092 * the relevant sync operations and translate them into journal operations. 1093 * 1094 * If we're not journalling data then we can just leave the buffers 1095 * on ->i_dirty_buffers. If someone writes them out for us then thanks. 1096 * Otherwise we'll do it in commit, if we're using ordered data. 1097 */ 1098 1099static int ext3_commit_write(struct file *file, struct page *page, 1100 unsigned from, unsigned to) 1101{ 1102 handle_t *handle = ext3_journal_current_handle(); 1103 struct inode *inode = page->mapping->host; 1104 int ret = 0, ret2; 1105 1106 lock_kernel(); 1107 if (ext3_should_journal_data(inode)) { 1108 /* 1109 * Here we duplicate the generic_commit_write() functionality 1110 */ 1111 int partial = 0; 1112 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; 1113 1114 ret = walk_page_buffers(handle, inode, page->buffers, 1115 from, to, &partial, commit_write_fn); 1116 if (!partial) 1117 SetPageUptodate(page); 1118 kunmap(page); 1119 if (pos > inode->i_size) 1120 inode->i_size = pos; 1121 EXT3_I(inode)->i_state |= EXT3_STATE_JDATA; 1122 } else { 1123 if (ext3_should_order_data(inode)) { 1124 ret = walk_page_buffers(handle, inode, page->buffers, 1125 from, to, NULL, journal_dirty_sync_data); 1126 } 1127 /* Be careful here if generic_commit_write becomes a 1128 * required invocation after block_prepare_write. */ 1129 if (ret == 0) { 1130 ret = generic_commit_write(file, page, from, to); 1131 } else { 1132 /* 1133 * block_prepare_write() was called, but we're not 1134 * going to call generic_commit_write(). So we 1135 * need to perform generic_commit_write()'s kunmap 1136 * by hand. 1137 */ 1138 kunmap(page); 1139 } 1140 } 1141 if (inode->i_size > inode->u.ext3_i.i_disksize) { 1142 inode->u.ext3_i.i_disksize = inode->i_size; 1143 ret2 = ext3_mark_inode_dirty(handle, inode); 1144 if (!ret) 1145 ret = ret2; 1146 } 1147 ret2 = ext3_journal_stop(handle, inode); 1148 unlock_kernel(); 1149 if (!ret) 1150 ret = ret2; 1151 return ret; 1152} 1153 1154/* 1155 * bmap() is special. It gets used by applications such as lilo and by 1156 * the swapper to find the on-disk block of a specific piece of data. 1157 * 1158 * Naturally, this is dangerous if the block concerned is still in the 1159 * journal. If somebody makes a swapfile on an ext3 data-journaling 1160 * filesystem and enables swap, then they may get a nasty shock when the 1161 * data getting swapped to that swapfile suddenly gets overwritten by 1162 * the original zero's written out previously to the journal and 1163 * awaiting writeback in the kernel's buffer cache. 1164 * 1165 * So, if we see any bmap calls here on a modified, data-journaled file, 1166 * take extra steps to flush any blocks which might be in the cache. 1167 */ 1168static int ext3_bmap(struct address_space *mapping, long block) 1169{ 1170 struct inode *inode = mapping->host; 1171 journal_t *journal; 1172 int err; 1173 1174 if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) { 1175 /* 1176 * This is a REALLY heavyweight approach, but the use of 1177 * bmap on dirty files is expected to be extremely rare: 1178 * only if we run lilo or swapon on a freshly made file 1179 * do we expect this to happen. 1180 * 1181 * (bmap requires CAP_SYS_RAWIO so this does not 1182 * represent an unprivileged user DOS attack --- we'd be 1183 * in trouble if mortal users could trigger this path at 1184 * will.) 1185 * 1186 * NB. EXT3_STATE_JDATA is not set on files other than 1187 * regular files. If somebody wants to bmap a directory 1188 * or symlink and gets confused because the buffer 1189 * hasn't yet been flushed to disk, they deserve 1190 * everything they get. 1191 */ 1192 1193 EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA; 1194 journal = EXT3_JOURNAL(inode); 1195 journal_lock_updates(journal); 1196 err = journal_flush(journal); 1197 journal_unlock_updates(journal); 1198 1199 if (err) 1200 return 0; 1201 } 1202 1203 return generic_block_bmap(mapping,block,ext3_get_block); 1204} 1205 1206static int bget_one(handle_t *handle, struct inode *inode, 1207 struct buffer_head *bh) 1208{ 1209 atomic_inc(&bh->b_count); 1210 return 0; 1211} 1212 1213static int ext3_writepage(struct page *page) 1214{ 1215 struct inode *inode = page->mapping->host; 1216 struct buffer_head *page_buffers; 1217 handle_t *handle = NULL; 1218 int ret = 0, err; 1219 int needed; 1220 int order_data; 1221 1222 J_ASSERT(PageLocked(page)); 1223 1224 /* 1225 * We give up here if we're reentered, because it might be 1226 * for a different filesystem. One *could* look for a 1227 * nested transaction opportunity. 1228 */ 1229 lock_kernel(); 1230 if (ext3_journal_current_handle()) 1231 goto out_fail; 1232 1233 needed = ext3_writepage_trans_blocks(inode); 1234 if (current->flags & PF_MEMALLOC) 1235 handle = ext3_journal_try_start(inode, needed); 1236 else 1237 handle = ext3_journal_start(inode, needed); 1238 1239 if (IS_ERR(handle)) { 1240 ret = PTR_ERR(handle); 1241 goto out_fail; 1242 } 1243 1244 order_data = ext3_should_order_data(inode) || 1245 ext3_should_journal_data(inode); 1246 1247 unlock_kernel(); 1248 1249 page_buffers = NULL; /* Purely to prevent compiler warning */ 1250 1251 /* bget() all the buffers */ 1252 if (order_data) { 1253 if (!page->buffers) 1254 create_empty_buffers(page, 1255 inode->i_dev, inode->i_sb->s_blocksize); 1256 page_buffers = page->buffers; 1257 walk_page_buffers(handle, inode, page_buffers, 0, 1258 PAGE_CACHE_SIZE, NULL, bget_one); 1259 } 1260 1261 ret = block_write_full_page(page, ext3_get_block); 1262 1263 /* 1264 * The page can become unlocked at any point now, and 1265 * truncate can then come in and change things. So we 1266 * can't touch *page from now on. But *page_buffers is 1267 * safe due to elevated refcount. 1268 */ 1269 1270 handle = ext3_journal_current_handle(); 1271 lock_kernel(); 1272 1273 /* And attach them to the current transaction */ 1274 if (order_data) { 1275 err = walk_page_buffers(handle, inode, page_buffers, 1276 0, PAGE_CACHE_SIZE, NULL, journal_dirty_async_data); 1277 if (!ret) 1278 ret = err; 1279 } 1280 1281 err = ext3_journal_stop(handle, inode); 1282 if (!ret) 1283 ret = err; 1284 unlock_kernel(); 1285 return ret; 1286 1287out_fail: 1288 1289 unlock_kernel(); 1290 SetPageDirty(page); 1291 UnlockPage(page); 1292 return ret; 1293} 1294 1295static int ext3_readpage(struct file *file, struct page *page) 1296{ 1297 return block_read_full_page(page,ext3_get_block); 1298} 1299 1300 1301static int ext3_flushpage(struct page *page, unsigned long offset) 1302{ 1303 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1304 return journal_flushpage(journal, page, offset); 1305} 1306 1307static int ext3_releasepage(struct page *page, int wait) 1308{ 1309 journal_t *journal = EXT3_JOURNAL(page->mapping->host); 1310 return journal_try_to_free_buffers(journal, page, wait); 1311} 1312 1313 1314struct address_space_operations ext3_aops = { 1315 readpage: ext3_readpage, /* BKL not held. Don't need */ 1316 writepage: ext3_writepage, /* BKL not held. We take it */ 1317 sync_page: block_sync_page, 1318 prepare_write: ext3_prepare_write, /* BKL not held. We take it */ 1319 commit_write: ext3_commit_write, /* BKL not held. We take it */ 1320 bmap: ext3_bmap, /* BKL held */ 1321 flushpage: ext3_flushpage, /* BKL not held. Don't need */ 1322 releasepage: ext3_releasepage, /* BKL not held. Don't need */ 1323}; 1324 1325/* 1326 * ext3_block_truncate_page() zeroes out a mapping from file offset `from' 1327 * up to the end of the block which corresponds to `from'. 1328 * This required during truncate. We need to physically zero the tail end 1329 * of that block so it doesn't yield old data if the file is later grown. 1330 */ 1331static int ext3_block_truncate_page(handle_t *handle, 1332 struct address_space *mapping, loff_t from) 1333{ 1334 unsigned long index = from >> PAGE_CACHE_SHIFT; 1335 unsigned offset = from & (PAGE_CACHE_SIZE-1); 1336 unsigned blocksize, iblock, length, pos; 1337 struct inode *inode = mapping->host; 1338 struct page *page; 1339 struct buffer_head *bh; 1340 int err; 1341 1342 blocksize = inode->i_sb->s_blocksize; 1343 length = offset & (blocksize - 1); 1344 1345 /* Block boundary? Nothing to do */ 1346 if (!length) 1347 return 0; 1348 1349 length = blocksize - length; 1350 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); 1351 1352 page = grab_cache_page(mapping, index); 1353 err = -ENOMEM; 1354 if (!page) 1355 goto out; 1356 1357 if (!page->buffers) 1358 create_empty_buffers(page, inode->i_dev, blocksize); 1359 1360 /* Find the buffer that contains "offset" */ 1361 bh = page->buffers; 1362 pos = blocksize; 1363 while (offset >= pos) { 1364 bh = bh->b_this_page; 1365 iblock++; 1366 pos += blocksize; 1367 } 1368 1369 err = 0; 1370 if (!buffer_mapped(bh)) { 1371 /* Hole? Nothing to do */ 1372 if (buffer_uptodate(bh)) 1373 goto unlock; 1374 ext3_get_block(inode, iblock, bh, 0); 1375 /* Still unmapped? Nothing to do */ 1376 if (!buffer_mapped(bh)) 1377 goto unlock; 1378 } 1379 1380 /* Ok, it's mapped. Make sure it's up-to-date */ 1381 if (Page_Uptodate(page)) 1382 set_bit(BH_Uptodate, &bh->b_state); 1383 1384 if (!buffer_uptodate(bh)) { 1385 err = -EIO; 1386 ll_rw_block(READ, 1, &bh); 1387 wait_on_buffer(bh); 1388 /* Uhhuh. Read error. Complain and punt. */ 1389 if (!buffer_uptodate(bh)) 1390 goto unlock; 1391 } 1392 1393 if (ext3_should_journal_data(inode)) { 1394 BUFFER_TRACE(bh, "get write access"); 1395 err = ext3_journal_get_write_access(handle, bh); 1396 if (err) 1397 goto unlock; 1398 } 1399 1400 memset(kmap(page) + offset, 0, length); 1401 flush_dcache_page(page); 1402 kunmap(page); 1403 1404 BUFFER_TRACE(bh, "zeroed end of block"); 1405 1406 err = 0; 1407 if (ext3_should_journal_data(inode)) { 1408 err = ext3_journal_dirty_metadata(handle, bh); 1409 } else { 1410 if (ext3_should_order_data(inode)) 1411 err = ext3_journal_dirty_data(handle, bh, 0); 1412 __mark_buffer_dirty(bh); 1413 } 1414 1415unlock: 1416 UnlockPage(page); 1417 page_cache_release(page); 1418out: 1419 return err; 1420} 1421 1422/* 1423 * Probably it should be a library function... search for first non-zero word 1424 * or memcmp with zero_page, whatever is better for particular architecture. 1425 * Linus? 1426 */ 1427static inline int all_zeroes(u32 *p, u32 *q) 1428{ 1429 while (p < q) 1430 if (*p++) 1431 return 0; 1432 return 1; 1433} 1434 1435/** 1436 * ext3_find_shared - find the indirect blocks for partial truncation. 1437 * @inode: inode in question 1438 * @depth: depth of the affected branch 1439 * @offsets: offsets of pointers in that branch (see ext3_block_to_path) 1440 * @chain: place to store the pointers to partial indirect blocks 1441 * @top: place to the (detached) top of branch 1442 * 1443 * This is a helper function used by ext3_truncate(). 1444 * 1445 * When we do truncate() we may have to clean the ends of several 1446 * indirect blocks but leave the blocks themselves alive. Block is 1447 * partially truncated if some data below the new i_size is refered 1448 * from it (and it is on the path to the first completely truncated 1449 * data block, indeed). We have to free the top of that path along 1450 * with everything to the right of the path. Since no allocation 1451 * past the truncation point is possible until ext3_truncate() 1452 * finishes, we may safely do the latter, but top of branch may 1453 * require special attention - pageout below the truncation point 1454 * might try to populate it. 1455 * 1456 * We atomically detach the top of branch from the tree, store the 1457 * block number of its root in *@top, pointers to buffer_heads of 1458 * partially truncated blocks - in @chain[].bh and pointers to 1459 * their last elements that should not be removed - in 1460 * @chain[].p. Return value is the pointer to last filled element 1461 * of @chain. 1462 * 1463 * The work left to caller to do the actual freeing of subtrees: 1464 * a) free the subtree starting from *@top 1465 * b) free the subtrees whose roots are stored in 1466 * (@chain[i].p+1 .. end of @chain[i].bh->b_data) 1467 * c) free the subtrees growing from the inode past the @chain[0]. 1468 * (no partially truncated stuff there). */ 1469 1470static Indirect *ext3_find_shared(struct inode *inode, 1471 int depth, 1472 int offsets[4], 1473 Indirect chain[4], 1474 u32 *top) 1475{ 1476 Indirect *partial, *p; 1477 int k, err; 1478 1479 *top = 0; 1480 /* Make k index the deepest non-null offest + 1 */ 1481 for (k = depth; k > 1 && !offsets[k-1]; k--) 1482 ; 1483 partial = ext3_get_branch(inode, k, offsets, chain, &err); 1484 /* Writer: pointers */ 1485 if (!partial) 1486 partial = chain + k-1; 1487 /* 1488 * If the branch acquired continuation since we've looked at it - 1489 * fine, it should all survive and (new) top doesn't belong to us. 1490 */ 1491 if (!partial->key && *partial->p) 1492 /* Writer: end */ 1493 goto no_top; 1494 for (p=partial; p>chain && all_zeroes((u32*)p->bh->b_data,p->p); p--) 1495 ; 1496 /* 1497 * OK, we've found the last block that must survive. The rest of our 1498 * branch should be detached before unlocking. However, if that rest 1499 * of branch is all ours and does not grow immediately from the inode 1500 * it's easier to cheat and just decrement partial->p. 1501 */ 1502 if (p == chain + k - 1 && p > chain) { 1503 p->p--; 1504 } else { 1505 *top = *p->p; 1506 /* Nope, don't do this in ext3. Must leave the tree intact */ 1507 } 1508 /* Writer: end */ 1509 1510 while(partial > p) 1511 { 1512 brelse(partial->bh); 1513 partial--; 1514 } 1515no_top: 1516 return partial; 1517} 1518 1519/* 1520 * Zero a number of block pointers in either an inode or an indirect block. 1521 * If we restart the transaction we must again get write access to the 1522 * indirect block for further modification. 1523 * 1524 * We release `count' blocks on disk, but (last - first) may be greater 1525 * than `count' because there can be holes in there. 1526 */ 1527static void 1528ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, 1529 unsigned long block_to_free, unsigned long count, 1530 u32 *first, u32 *last) 1531{ 1532 u32 *p; 1533 if (try_to_extend_transaction(handle, inode)) { 1534 if (bh) { 1535 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 1536 ext3_journal_dirty_metadata(handle, bh); 1537 } 1538 ext3_mark_inode_dirty(handle, inode); 1539 ext3_journal_test_restart(handle, inode); 1540 if (bh) { 1541 BUFFER_TRACE(bh, "retaking write access"); 1542 ext3_journal_get_write_access(handle, bh); 1543 } 1544 } 1545 1546 /* 1547 * Any buffers which are on the journal will be in memory. We find 1548 * them on the hash table so journal_revoke() will run journal_forget() 1549 * on them. We've already detached each block from the file, so 1550 * bforget() in journal_forget() should be safe. 1551 * 1552 * AKPM: turn on bforget in journal_forget()!!! 1553 */ 1554 for (p = first; p < last; p++) { 1555 u32 nr = le32_to_cpu(*p); 1556 if (nr) { 1557 struct buffer_head *bh; 1558 1559 *p = 0; 1560 bh = sb_get_hash_table(inode->i_sb, nr); 1561 ext3_forget(handle, 0, inode, bh, nr); 1562 } 1563 } 1564 1565 ext3_free_blocks(handle, inode, block_to_free, count); 1566} 1567 1568/** 1569 * ext3_free_data - free a list of data blocks 1570 * @handle: handle for this transaction 1571 * @inode: inode we are dealing with 1572 * @this_bh: indirect buffer_head which contains *@first and *@last 1573 * @first: array of block numbers 1574 * @last: points immediately past the end of array 1575 * 1576 * We are freeing all blocks refered from that array (numbers are stored as 1577 * little-endian 32-bit) and updating @inode->i_blocks appropriately. 1578 * 1579 * We accumulate contiguous runs of blocks to free. Conveniently, if these 1580 * blocks are contiguous then releasing them at one time will only affect one 1581 * or two bitmap blocks (+ group descriptor(s) and superblock) and we won't 1582 * actually use a lot of journal space. 1583 * 1584 * @this_bh will be %NULL if @first and @last point into the inode's direct 1585 * block pointers. 1586 */ 1587static void ext3_free_data(handle_t *handle, struct inode *inode, 1588 struct buffer_head *this_bh, u32 *first, u32 *last) 1589{ 1590 unsigned long block_to_free = 0; /* Starting block # of a run */ 1591 unsigned long count = 0; /* Number of blocks in the run */ 1592 u32 *block_to_free_p = NULL; /* Pointer into inode/ind 1593 corresponding to 1594 block_to_free */ 1595 unsigned long nr; /* Current block # */ 1596 u32 *p; /* Pointer into inode/ind 1597 for current block */ 1598 int err; 1599 1600 if (this_bh) { /* For indirect block */ 1601 BUFFER_TRACE(this_bh, "get_write_access"); 1602 err = ext3_journal_get_write_access(handle, this_bh); 1603 /* Important: if we can't update the indirect pointers 1604 * to the blocks, we can't free them. */ 1605 if (err) 1606 return; 1607 } 1608 1609 for (p = first; p < last; p++) { 1610 nr = le32_to_cpu(*p); 1611 if (nr) { 1612 /* accumulate blocks to free if they're contiguous */ 1613 if (count == 0) { 1614 block_to_free = nr; 1615 block_to_free_p = p; 1616 count = 1; 1617 } else if (nr == block_to_free + count) { 1618 count++; 1619 } else { 1620 ext3_clear_blocks(handle, inode, this_bh, 1621 block_to_free, 1622 count, block_to_free_p, p); 1623 block_to_free = nr; 1624 block_to_free_p = p; 1625 count = 1; 1626 } 1627 } 1628 } 1629 1630 if (count > 0) 1631 ext3_clear_blocks(handle, inode, this_bh, block_to_free, 1632 count, block_to_free_p, p); 1633 1634 if (this_bh) { 1635 BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata"); 1636 ext3_journal_dirty_metadata(handle, this_bh); 1637 } 1638} 1639 1640/** 1641 * ext3_free_branches - free an array of branches 1642 * @handle: JBD handle for this transaction 1643 * @inode: inode we are dealing with 1644 * @parent_bh: the buffer_head which contains *@first and *@last 1645 * @first: array of block numbers 1646 * @last: pointer immediately past the end of array 1647 * @depth: depth of the branches to free 1648 * 1649 * We are freeing all blocks refered from these branches (numbers are 1650 * stored as little-endian 32-bit) and updating @inode->i_blocks 1651 * appropriately. 1652 */ 1653static void ext3_free_branches(handle_t *handle, struct inode *inode, 1654 struct buffer_head *parent_bh, 1655 u32 *first, u32 *last, int depth) 1656{ 1657 unsigned long nr; 1658 u32 *p; 1659 1660 if (is_handle_aborted(handle)) 1661 return; 1662 1663 if (depth--) { 1664 struct buffer_head *bh; 1665 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 1666 p = last; 1667 while (--p >= first) { 1668 nr = le32_to_cpu(*p); 1669 if (!nr) 1670 continue; /* A hole */ 1671 1672 /* Go read the buffer for the next level down */ 1673 bh = sb_bread(inode->i_sb, nr); 1674 1675 /* 1676 * A read failure? Report error and clear slot 1677 * (should be rare). 1678 */ 1679 if (!bh) { 1680 ext3_error(inode->i_sb, "ext3_free_branches", 1681 "Read failure, inode=%ld, block=%ld", 1682 inode->i_ino, nr); 1683 continue; 1684 } 1685 1686 /* This zaps the entire block. Bottom up. */ 1687 BUFFER_TRACE(bh, "free child branches"); 1688 ext3_free_branches(handle, inode, bh, (u32*)bh->b_data, 1689 (u32*)bh->b_data + addr_per_block, 1690 depth); 1691 1692 /* 1693 * We've probably journalled the indirect block several 1694 * times during the truncate. But it's no longer 1695 * needed and we now drop it from the transaction via 1696 * journal_revoke(). 1697 * 1698 * That's easy if it's exclusively part of this 1699 * transaction. But if it's part of the committing 1700 * transaction then journal_forget() will simply 1701 * brelse() it. That means that if the underlying 1702 * block is reallocated in ext3_get_block(), 1703 * unmap_underlying_metadata() will find this block 1704 * and will try to get rid of it. damn, damn. 1705 * 1706 * If this block has already been committed to the 1707 * journal, a revoke record will be written. And 1708 * revoke records must be emitted *before* clearing 1709 * this block's bit in the bitmaps. 1710 */ 1711 ext3_forget(handle, 1, inode, bh, bh->b_blocknr); 1712 1713 /* 1714 * Everything below this this pointer has been 1715 * released. Now let this top-of-subtree go. 1716 * 1717 * We want the freeing of this indirect block to be 1718 * atomic in the journal with the updating of the 1719 * bitmap block which owns it. So make some room in 1720 * the journal. 1721 * 1722 * We zero the parent pointer *after* freeing its 1723 * pointee in the bitmaps, so if extend_transaction() 1724 * for some reason fails to put the bitmap changes and 1725 * the release into the same transaction, recovery 1726 * will merely complain about releasing a free block, 1727 * rather than leaking blocks. 1728 */ 1729 if (is_handle_aborted(handle)) 1730 return; 1731 if (try_to_extend_transaction(handle, inode)) { 1732 ext3_mark_inode_dirty(handle, inode); 1733 ext3_journal_test_restart(handle, inode); 1734 } 1735 1736 ext3_free_blocks(handle, inode, nr, 1); 1737 1738 if (parent_bh) { 1739 /* 1740 * The block which we have just freed is 1741 * pointed to by an indirect block: journal it 1742 */ 1743 BUFFER_TRACE(parent_bh, "get_write_access"); 1744 if (!ext3_journal_get_write_access(handle, 1745 parent_bh)){ 1746 *p = 0; 1747 BUFFER_TRACE(parent_bh, 1748 "call ext3_journal_dirty_metadata"); 1749 ext3_journal_dirty_metadata(handle, 1750 parent_bh); 1751 } 1752 } 1753 } 1754 } else { 1755 /* We have reached the bottom of the tree. */ 1756 BUFFER_TRACE(parent_bh, "free data blocks"); 1757 ext3_free_data(handle, inode, parent_bh, first, last); 1758 } 1759} 1760 1761/* 1762 * ext3_truncate() 1763 * 1764 * We block out ext3_get_block() block instantiations across the entire 1765 * transaction, and VFS/VM ensures that ext3_truncate() cannot run 1766 * simultaneously on behalf of the same inode. 1767 * 1768 * As we work through the truncate and commmit bits of it to the journal there 1769 * is one core, guiding principle: the file's tree must always be consistent on 1770 * disk. We must be able to restart the truncate after a crash. 1771 * 1772 * The file's tree may be transiently inconsistent in memory (although it 1773 * probably isn't), but whenever we close off and commit a journal transaction, 1774 * the contents of (the filesystem + the journal) must be consistent and 1775 * restartable. It's pretty simple, really: bottom up, right to left (although 1776 * left-to-right works OK too). 1777 * 1778 * Note that at recovery time, journal replay occurs *before* the restart of 1779 * truncate against the orphan inode list. 1780 * 1781 * The committed inode has the new, desired i_size (which is the same as 1782 * i_disksize in this case). After a crash, ext3_orphan_cleanup() will see 1783 * that this inode's truncate did not complete and it will again call 1784 * ext3_truncate() to have another go. So there will be instantiated blocks 1785 * to the right of the truncation point in a crashed ext3 filesystem. But 1786 * that's fine - as long as they are linked from the inode, the post-crash 1787 * ext3_truncate() run will find them and release them. 1788 */ 1789 1790void ext3_truncate(struct inode * inode) 1791{ 1792 handle_t *handle; 1793 u32 *i_data = inode->u.ext3_i.i_data; 1794 int addr_per_block = EXT3_ADDR_PER_BLOCK(inode->i_sb); 1795 int offsets[4]; 1796 Indirect chain[4]; 1797 Indirect *partial; 1798 int nr = 0; 1799 int n; 1800 long last_block; 1801 unsigned blocksize; 1802 1803 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1804 S_ISLNK(inode->i_mode))) 1805 return; 1806 if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1807 return; 1808 1809 ext3_discard_prealloc(inode); 1810 1811 handle = start_transaction(inode); 1812 if (IS_ERR(handle)) 1813 return; /* AKPM: return what? */ 1814 1815 blocksize = inode->i_sb->s_blocksize; 1816 last_block = (inode->i_size + blocksize-1) 1817 >> EXT3_BLOCK_SIZE_BITS(inode->i_sb); 1818 1819 ext3_block_truncate_page(handle, inode->i_mapping, inode->i_size); 1820 1821 1822 n = ext3_block_to_path(inode, last_block, offsets); 1823 if (n == 0) 1824 goto out_stop; /* error */ 1825 1826 /* 1827 * OK. This truncate is going to happen. We add the inode to the 1828 * orphan list, so that if this truncate spans multiple transactions, 1829 * and we crash, we will resume the truncate when the filesystem 1830 * recovers. It also marks the inode dirty, to catch the new size. 1831 * 1832 * Implication: the file must always be in a sane, consistent 1833 * truncatable state while each transaction commits. 1834 */ 1835 if (ext3_orphan_add(handle, inode)) 1836 goto out_stop; 1837 1838 /* 1839 * The orphan list entry will now protect us from any crash which 1840 * occurs before the truncate completes, so it is now safe to propagate 1841 * the new, shorter inode size (held for now in i_size) into the 1842 * on-disk inode. We do this via i_disksize, which is the value which 1843 * ext3 *really* writes onto the disk inode. 1844 */ 1845 inode->u.ext3_i.i_disksize = inode->i_size; 1846 1847 /* 1848 * From here we block out all ext3_get_block() callers who want to 1849 * modify the block allocation tree. 1850 */ 1851 down_write(&inode->u.ext3_i.truncate_sem); 1852 1853 if (n == 1) { /* direct blocks */ 1854 ext3_free_data(handle, inode, NULL, i_data+offsets[0], 1855 i_data + EXT3_NDIR_BLOCKS); 1856 goto do_indirects; 1857 } 1858 1859 partial = ext3_find_shared(inode, n, offsets, chain, &nr); 1860 /* Kill the top of shared branch (not detached) */ 1861 if (nr) { 1862 if (partial == chain) { 1863 /* Shared branch grows from the inode */ 1864 ext3_free_branches(handle, inode, NULL, 1865 &nr, &nr+1, (chain+n-1) - partial); 1866 *partial->p = 0; 1867 /* 1868 * We mark the inode dirty prior to restart, 1869 * and prior to stop. No need for it here. 1870 */ 1871 } else { 1872 /* Shared branch grows from an indirect block */ 1873 BUFFER_TRACE(partial->bh, "get_write_access"); 1874 ext3_free_branches(handle, inode, partial->bh, 1875 partial->p, 1876 partial->p+1, (chain+n-1) - partial); 1877 } 1878 } 1879 /* Clear the ends of indirect blocks on the shared branch */ 1880 while (partial > chain) { 1881 ext3_free_branches(handle, inode, partial->bh, partial->p + 1, 1882 (u32*)partial->bh->b_data + addr_per_block, 1883 (chain+n-1) - partial); 1884 BUFFER_TRACE(partial->bh, "call brelse"); 1885 brelse (partial->bh); 1886 partial--; 1887 } 1888do_indirects: 1889 /* Kill the remaining (whole) subtrees */ 1890 switch (offsets[0]) { 1891 default: 1892 nr = i_data[EXT3_IND_BLOCK]; 1893 if (nr) { 1894 ext3_free_branches(handle, inode, NULL, 1895 &nr, &nr+1, 1); 1896 i_data[EXT3_IND_BLOCK] = 0; 1897 } 1898 case EXT3_IND_BLOCK: 1899 nr = i_data[EXT3_DIND_BLOCK]; 1900 if (nr) { 1901 ext3_free_branches(handle, inode, NULL, 1902 &nr, &nr+1, 2); 1903 i_data[EXT3_DIND_BLOCK] = 0; 1904 } 1905 case EXT3_DIND_BLOCK: 1906 nr = i_data[EXT3_TIND_BLOCK]; 1907 if (nr) { 1908 ext3_free_branches(handle, inode, NULL, 1909 &nr, &nr+1, 3); 1910 i_data[EXT3_TIND_BLOCK] = 0; 1911 } 1912 case EXT3_TIND_BLOCK: 1913 ; 1914 } 1915 up_write(&inode->u.ext3_i.truncate_sem); 1916 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1917 ext3_mark_inode_dirty(handle, inode); 1918 1919 /* In a multi-transaction truncate, we only make the final 1920 * transaction synchronous */ 1921 if (IS_SYNC(inode)) 1922 handle->h_sync = 1; 1923out_stop: 1924 /* 1925 * If this was a simple ftruncate(), and the file will remain alive 1926 * then we need to clear up the orphan record which we created above. 1927 * However, if this was a real unlink then we were called by 1928 * ext3_delete_inode(), and we allow that function to clean up the 1929 * orphan info for us. 1930 */ 1931 if (inode->i_nlink) 1932 ext3_orphan_del(handle, inode); 1933 1934 ext3_journal_stop(handle, inode); 1935} 1936 1937/* 1938 * ext3_get_inode_loc returns with an extra refcount against the 1939 * inode's underlying buffer_head on success. 1940 */ 1941 1942int ext3_get_inode_loc (struct inode *inode, struct ext3_iloc *iloc) 1943{ 1944 struct buffer_head *bh = 0; 1945 unsigned long block; 1946 unsigned long block_group; 1947 unsigned long group_desc; 1948 unsigned long desc; 1949 unsigned long offset; 1950 struct ext3_group_desc * gdp; 1951 1952 if ((inode->i_ino != EXT3_ROOT_INO && 1953 inode->i_ino != EXT3_ACL_IDX_INO && 1954 inode->i_ino != EXT3_ACL_DATA_INO && 1955 inode->i_ino != EXT3_JOURNAL_INO && 1956 inode->i_ino < EXT3_FIRST_INO(inode->i_sb)) || 1957 inode->i_ino > le32_to_cpu( 1958 inode->i_sb->u.ext3_sb.s_es->s_inodes_count)) { 1959 ext3_error (inode->i_sb, "ext3_get_inode_loc", 1960 "bad inode number: %lu", inode->i_ino); 1961 goto bad_inode; 1962 } 1963 block_group = (inode->i_ino - 1) / EXT3_INODES_PER_GROUP(inode->i_sb); 1964 if (block_group >= inode->i_sb->u.ext3_sb.s_groups_count) { 1965 ext3_error (inode->i_sb, "ext3_get_inode_loc", 1966 "group >= groups count"); 1967 goto bad_inode; 1968 } 1969 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(inode->i_sb); 1970 desc = block_group & (EXT3_DESC_PER_BLOCK(inode->i_sb) - 1); 1971 bh = inode->i_sb->u.ext3_sb.s_group_desc[group_desc]; 1972 if (!bh) { 1973 ext3_error (inode->i_sb, "ext3_get_inode_loc", 1974 "Descriptor not loaded"); 1975 goto bad_inode; 1976 } 1977 1978 gdp = (struct ext3_group_desc *) bh->b_data; 1979 /* 1980 * Figure out the offset within the block group inode table 1981 */ 1982 offset = ((inode->i_ino - 1) % EXT3_INODES_PER_GROUP(inode->i_sb)) * 1983 EXT3_INODE_SIZE(inode->i_sb); 1984 block = le32_to_cpu(gdp[desc].bg_inode_table) + 1985 (offset >> EXT3_BLOCK_SIZE_BITS(inode->i_sb)); 1986 if (!(bh = sb_bread(inode->i_sb, block))) { 1987 ext3_error (inode->i_sb, "ext3_get_inode_loc", 1988 "unable to read inode block - " 1989 "inode=%lu, block=%lu", inode->i_ino, block); 1990 goto bad_inode; 1991 } 1992 offset &= (EXT3_BLOCK_SIZE(inode->i_sb) - 1); 1993 1994 iloc->bh = bh; 1995 iloc->raw_inode = (struct ext3_inode *) (bh->b_data + offset); 1996 iloc->block_group = block_group; 1997 1998 return 0; 1999 2000 bad_inode: 2001 return -EIO; 2002} 2003 2004void ext3_read_inode(struct inode * inode) 2005{ 2006 struct ext3_iloc iloc; 2007 struct ext3_inode *raw_inode; 2008 struct buffer_head *bh; 2009 int block; 2010 2011 if(ext3_get_inode_loc(inode, &iloc)) 2012 goto bad_inode; 2013 bh = iloc.bh; 2014 raw_inode = iloc.raw_inode; 2015 init_rwsem(&inode->u.ext3_i.truncate_sem); 2016 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 2017 inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 2018 inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 2019 if(!(test_opt (inode->i_sb, NO_UID32))) { 2020 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16; 2021 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16; 2022 } 2023 inode->i_nlink = le16_to_cpu(raw_inode->i_links_count); 2024 inode->i_size = le32_to_cpu(raw_inode->i_size); 2025 inode->i_atime = le32_to_cpu(raw_inode->i_atime); 2026 inode->i_ctime = le32_to_cpu(raw_inode->i_ctime); 2027 inode->i_mtime = le32_to_cpu(raw_inode->i_mtime); 2028 inode->u.ext3_i.i_dtime = le32_to_cpu(raw_inode->i_dtime); 2029 /* We now have enough fields to check if the inode was active or not. 2030 * This is needed because nfsd might try to access dead inodes 2031 * the test is that same one that e2fsck uses 2032 * NeilBrown 1999oct15 2033 */ 2034 if (inode->i_nlink == 0) { 2035 if (inode->i_mode == 0 || 2036 !(inode->i_sb->u.ext3_sb.s_mount_state & EXT3_ORPHAN_FS)) { 2037 /* this inode is deleted */ 2038 brelse (bh); 2039 goto bad_inode; 2040 } 2041 /* The only unlinked inodes we let through here have 2042 * valid i_mode and are being read by the orphan 2043 * recovery code: that's fine, we're about to complete 2044 * the process of deleting those. */ 2045 } 2046 inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size 2047 * (for stat), not the fs block 2048 * size */ 2049 inode->i_blocks = le32_to_cpu(raw_inode->i_blocks); 2050 inode->i_version = ++event; 2051 inode->u.ext3_i.i_flags = le32_to_cpu(raw_inode->i_flags); 2052#ifdef EXT3_FRAGMENTS 2053 inode->u.ext3_i.i_faddr = le32_to_cpu(raw_inode->i_faddr); 2054 inode->u.ext3_i.i_frag_no = raw_inode->i_frag; 2055 inode->u.ext3_i.i_frag_size = raw_inode->i_fsize; 2056#endif 2057 inode->u.ext3_i.i_file_acl = le32_to_cpu(raw_inode->i_file_acl); 2058 if (!S_ISREG(inode->i_mode)) { 2059 inode->u.ext3_i.i_dir_acl = le32_to_cpu(raw_inode->i_dir_acl); 2060 } else { 2061 inode->i_size |= 2062 ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32; 2063 } 2064 inode->u.ext3_i.i_disksize = inode->i_size; 2065 inode->i_generation = le32_to_cpu(raw_inode->i_generation); 2066#ifdef EXT3_PREALLOCATE 2067 inode->u.ext3_i.i_prealloc_count = 0; 2068#endif 2069 inode->u.ext3_i.i_block_group = iloc.block_group; 2070 2071 /* 2072 * NOTE! The in-memory inode i_data array is in little-endian order 2073 * even on big-endian machines: we do NOT byteswap the block numbers! 2074 */ 2075 for (block = 0; block < EXT3_N_BLOCKS; block++) 2076 inode->u.ext3_i.i_data[block] = iloc.raw_inode->i_block[block]; 2077 INIT_LIST_HEAD(&inode->u.ext3_i.i_orphan); 2078 2079 brelse (iloc.bh); 2080 2081 if (inode->i_ino == EXT3_ACL_IDX_INO || 2082 inode->i_ino == EXT3_ACL_DATA_INO) 2083 /* Nothing to do */ ; 2084 else if (S_ISREG(inode->i_mode)) { 2085 inode->i_op = &ext3_file_inode_operations; 2086 inode->i_fop = &ext3_file_operations; 2087 inode->i_mapping->a_ops = &ext3_aops; 2088 } else if (S_ISDIR(inode->i_mode)) { 2089 inode->i_op = &ext3_dir_inode_operations; 2090 inode->i_fop = &ext3_dir_operations; 2091 } else if (S_ISLNK(inode->i_mode)) { 2092 if (!inode->i_blocks) 2093 inode->i_op = &ext3_fast_symlink_inode_operations; 2094 else { 2095 inode->i_op = &page_symlink_inode_operations; 2096 inode->i_mapping->a_ops = &ext3_aops; 2097 } 2098 } else 2099 init_special_inode(inode, inode->i_mode, 2100 le32_to_cpu(iloc.raw_inode->i_block[0])); 2101 /* inode->i_attr_flags = 0; unused */ 2102 if (inode->u.ext3_i.i_flags & EXT3_SYNC_FL) { 2103 /* inode->i_attr_flags |= ATTR_FLAG_SYNCRONOUS; unused */ 2104 inode->i_flags |= S_SYNC; 2105 } 2106 if (inode->u.ext3_i.i_flags & EXT3_APPEND_FL) { 2107 /* inode->i_attr_flags |= ATTR_FLAG_APPEND; unused */ 2108 inode->i_flags |= S_APPEND; 2109 } 2110 if (inode->u.ext3_i.i_flags & EXT3_IMMUTABLE_FL) { 2111 /* inode->i_attr_flags |= ATTR_FLAG_IMMUTABLE; unused */ 2112 inode->i_flags |= S_IMMUTABLE; 2113 } 2114 if (inode->u.ext3_i.i_flags & EXT3_NOATIME_FL) { 2115 /* inode->i_attr_flags |= ATTR_FLAG_NOATIME; unused */ 2116 inode->i_flags |= S_NOATIME; 2117 } 2118 return; 2119 2120bad_inode: 2121 make_bad_inode(inode); 2122 return; 2123} 2124 2125/* 2126 * Post the struct inode info into an on-disk inode location in the 2127 * buffer-cache. This gobbles the caller's reference to the 2128 * buffer_head in the inode location struct. 2129 */ 2130 2131static int ext3_do_update_inode(handle_t *handle, 2132 struct inode *inode, 2133 struct ext3_iloc *iloc) 2134{ 2135 struct ext3_inode *raw_inode = iloc->raw_inode; 2136 struct buffer_head *bh = iloc->bh; 2137 int err = 0, rc, block; 2138 2139 if (handle) { 2140 BUFFER_TRACE(bh, "get_write_access"); 2141 err = ext3_journal_get_write_access(handle, bh); 2142 if (err) 2143 goto out_brelse; 2144 } 2145 raw_inode->i_mode = cpu_to_le16(inode->i_mode); 2146 if(!(test_opt(inode->i_sb, NO_UID32))) { 2147 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid)); 2148 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid)); 2149/* 2150 * Fix up interoperability with old kernels. Otherwise, old inodes get 2151 * re-used with the upper 16 bits of the uid/gid intact 2152 */ 2153 if(!inode->u.ext3_i.i_dtime) { 2154 raw_inode->i_uid_high = 2155 cpu_to_le16(high_16_bits(inode->i_uid)); 2156 raw_inode->i_gid_high = 2157 cpu_to_le16(high_16_bits(inode->i_gid)); 2158 } else { 2159 raw_inode->i_uid_high = 0; 2160 raw_inode->i_gid_high = 0; 2161 } 2162 } else { 2163 raw_inode->i_uid_low = 2164 cpu_to_le16(fs_high2lowuid(inode->i_uid)); 2165 raw_inode->i_gid_low = 2166 cpu_to_le16(fs_high2lowgid(inode->i_gid)); 2167 raw_inode->i_uid_high = 0; 2168 raw_inode->i_gid_high = 0; 2169 } 2170 raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); 2171 raw_inode->i_size = cpu_to_le32(inode->u.ext3_i.i_disksize); 2172 raw_inode->i_atime = cpu_to_le32(inode->i_atime); 2173 raw_inode->i_ctime = cpu_to_le32(inode->i_ctime); 2174 raw_inode->i_mtime = cpu_to_le32(inode->i_mtime); 2175 raw_inode->i_blocks = cpu_to_le32(inode->i_blocks); 2176 raw_inode->i_dtime = cpu_to_le32(inode->u.ext3_i.i_dtime); 2177 raw_inode->i_flags = cpu_to_le32(inode->u.ext3_i.i_flags); 2178#ifdef EXT3_FRAGMENTS 2179 raw_inode->i_faddr = cpu_to_le32(inode->u.ext3_i.i_faddr); 2180 raw_inode->i_frag = inode->u.ext3_i.i_frag_no; 2181 raw_inode->i_fsize = inode->u.ext3_i.i_frag_size; 2182#else 2183 /* If we are not tracking these fields in the in-memory inode, 2184 * then preserve them on disk, but still initialise them to zero 2185 * for new inodes. */ 2186 if (EXT3_I(inode)->i_state & EXT3_STATE_NEW) { 2187 raw_inode->i_faddr = 0; 2188 raw_inode->i_frag = 0; 2189 raw_inode->i_fsize = 0; 2190 } 2191#endif 2192 raw_inode->i_file_acl = cpu_to_le32(inode->u.ext3_i.i_file_acl); 2193 if (!S_ISREG(inode->i_mode)) { 2194 raw_inode->i_dir_acl = cpu_to_le32(inode->u.ext3_i.i_dir_acl); 2195 } else { 2196 raw_inode->i_size_high = 2197 cpu_to_le32(inode->u.ext3_i.i_disksize >> 32); 2198 if (inode->u.ext3_i.i_disksize > 0x7fffffffULL) { 2199 struct super_block *sb = inode->i_sb; 2200 if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, 2201 EXT3_FEATURE_RO_COMPAT_LARGE_FILE) || 2202 EXT3_SB(sb)->s_es->s_rev_level == 2203 cpu_to_le32(EXT3_GOOD_OLD_REV)) { 2204 /* If this is the first large file 2205 * created, add a flag to the superblock. 2206 */ 2207 err = ext3_journal_get_write_access(handle, 2208 sb->u.ext3_sb.s_sbh); 2209 if (err) 2210 goto out_brelse; 2211 ext3_update_dynamic_rev(sb); 2212 EXT3_SET_RO_COMPAT_FEATURE(sb, 2213 EXT3_FEATURE_RO_COMPAT_LARGE_FILE); 2214 sb->s_dirt = 1; 2215 handle->h_sync = 1; 2216 err = ext3_journal_dirty_metadata(handle, 2217 sb->u.ext3_sb.s_sbh); 2218 } 2219 } 2220 } 2221 raw_inode->i_generation = le32_to_cpu(inode->i_generation); 2222 if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) 2223 raw_inode->i_block[0] = 2224 cpu_to_le32(kdev_t_to_nr(inode->i_rdev)); 2225 else for (block = 0; block < EXT3_N_BLOCKS; block++) 2226 raw_inode->i_block[block] = inode->u.ext3_i.i_data[block]; 2227 2228 BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); 2229 rc = ext3_journal_dirty_metadata(handle, bh); 2230 if (!err) 2231 err = rc; 2232 EXT3_I(inode)->i_state &= ~EXT3_STATE_NEW; 2233 2234out_brelse: 2235 brelse (bh); 2236 ext3_std_error(inode->i_sb, err); 2237 return err; 2238} 2239 2240/* 2241 * ext3_write_inode() 2242 * 2243 * We are called from a few places: 2244 * 2245 * - Within generic_file_write() for O_SYNC files. 2246 * Here, there will be no transaction running. We wait for any running 2247 * trasnaction to commit. 2248 * 2249 * - Within sys_sync(), kupdate and such. 2250 * We wait on commit, if tol to. 2251 * 2252 * - Within prune_icache() (PF_MEMALLOC == true) 2253 * Here we simply return. We can't afford to block kswapd on the 2254 * journal commit. 2255 * 2256 * In all cases it is actually safe for us to return without doing anything, 2257 * because the inode has been copied into a raw inode buffer in 2258 * ext3_mark_inode_dirty(). This is a correctness thing for O_SYNC and for 2259 * knfsd. 2260 * 2261 * Note that we are absolutely dependent upon all inode dirtiers doing the 2262 * right thing: they *must* call mark_inode_dirty() after dirtying info in 2263 * which we are interested. 2264 * 2265 * It would be a bug for them to not do this. The code: 2266 * 2267 * mark_inode_dirty(inode) 2268 * stuff(); 2269 * inode->i_size = expr; 2270 * 2271 * is in error because a kswapd-driven write_inode() could occur while 2272 * `stuff()' is running, and the new i_size will be lost. Plus the inode 2273 * will no longer be on the superblock's dirty inode list. 2274 */ 2275void ext3_write_inode(struct inode *inode, int wait) 2276{ 2277 if (current->flags & PF_MEMALLOC) 2278 return; 2279 2280 if (ext3_journal_current_handle()) { 2281 jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n"); 2282 return; 2283 } 2284 2285 if (!wait) 2286 return; 2287 2288 ext3_force_commit(inode->i_sb); 2289} 2290 2291/* 2292 * ext3_setattr() 2293 * 2294 * Called from notify_change. 2295 * 2296 * We want to trap VFS attempts to truncate the file as soon as 2297 * possible. In particular, we want to make sure that when the VFS 2298 * shrinks i_size, we put the inode on the orphan list and modify 2299 * i_disksize immediately, so that during the subsequent flushing of 2300 * dirty pages and freeing of disk blocks, we can guarantee that any 2301 * commit will leave the blocks being flushed in an unused state on 2302 * disk. (On recovery, the inode will get truncated and the blocks will 2303 * be freed, so we have a strong guarantee that no future commit will 2304 * leave these blocks visible to the user.) 2305 * 2306 * This is only needed for regular files. rmdir() has its own path, and 2307 * we can never truncate a direcory except on final unlink (at which 2308 * point i_nlink is zero so recovery is easy.) 2309 * 2310 * Called with the BKL. 2311 */ 2312 2313int ext3_setattr(struct dentry *dentry, struct iattr *attr) 2314{ 2315 struct inode *inode = dentry->d_inode; 2316 int error, rc = 0; 2317 const unsigned int ia_valid = attr->ia_valid; 2318 2319 error = inode_change_ok(inode, attr); 2320 if (error) 2321 return error; 2322 2323 if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) || 2324 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) { 2325 error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0; 2326 if (error) 2327 return error; 2328 } 2329 2330 if (attr->ia_valid & ATTR_SIZE && attr->ia_size < inode->i_size) { 2331 handle_t *handle; 2332 2333 handle = ext3_journal_start(inode, 3); 2334 if (IS_ERR(handle)) { 2335 error = PTR_ERR(handle); 2336 goto err_out; 2337 } 2338 2339 error = ext3_orphan_add(handle, inode); 2340 inode->u.ext3_i.i_disksize = attr->ia_size; 2341 rc = ext3_mark_inode_dirty(handle, inode); 2342 if (!error) 2343 error = rc; 2344 ext3_journal_stop(handle, inode); 2345 } 2346 2347 rc = inode_setattr(inode, attr); 2348 2349 /* If inode_setattr's call to ext3_truncate failed to get a 2350 * transaction handle at all, we need to clean up the in-core 2351 * orphan list manually. */ 2352 if (inode->i_nlink) 2353 ext3_orphan_del(NULL, inode); 2354 2355err_out: 2356 ext3_std_error(inode->i_sb, error); 2357 if (!error) 2358 error = rc; 2359 return error; 2360} 2361 2362 2363/* 2364 * akpm: how many blocks doth make a writepage()? 2365 * 2366 * With N blocks per page, it may be: 2367 * N data blocks 2368 * 2 indirect block 2369 * 2 dindirect 2370 * 1 tindirect 2371 * N+5 bitmap blocks (from the above) 2372 * N+5 group descriptor summary blocks 2373 * 1 inode block 2374 * 1 superblock. 2375 * 2 * EXT3_SINGLEDATA_TRANS_BLOCKS for the quote files 2376 * 2377 * 3 * (N + 5) + 2 + 2 * EXT3_SINGLEDATA_TRANS_BLOCKS 2378 * 2379 * With ordered or writeback data it's the same, less the N data blocks. 2380 * 2381 * If the inode's direct blocks can hold an integral number of pages then a 2382 * page cannot straddle two indirect blocks, and we can only touch one indirect 2383 * and dindirect block, and the "5" above becomes "3". 2384 * 2385 * This still overestimates under most circumstances. If we were to pass the 2386 * start and end offsets in here as well we could do block_to_path() on each 2387 * block and work out the exact number of indirects which are touched. Pah. 2388 */ 2389 2390int ext3_writepage_trans_blocks(struct inode *inode) 2391{ 2392 int bpp = ext3_journal_blocks_per_page(inode); 2393 int indirects = (EXT3_NDIR_BLOCKS % bpp) ? 5 : 3; 2394 int ret; 2395 2396 if (ext3_should_journal_data(inode)) 2397 ret = 3 * (bpp + indirects) + 2; 2398 else 2399 ret = 2 * (bpp + indirects) + 2; 2400 2401#ifdef CONFIG_QUOTA 2402 ret += 2 * EXT3_SINGLEDATA_TRANS_BLOCKS; 2403#endif 2404 2405 return ret; 2406} 2407 2408int 2409ext3_mark_iloc_dirty(handle_t *handle, 2410 struct inode *inode, 2411 struct ext3_iloc *iloc) 2412{ 2413 int err = 0; 2414 2415 if (handle) { 2416 /* the do_update_inode consumes one bh->b_count */ 2417 atomic_inc(&iloc->bh->b_count); 2418 err = ext3_do_update_inode(handle, inode, iloc); 2419 /* ext3_do_update_inode() does journal_dirty_metadata */ 2420 brelse(iloc->bh); 2421 } else { 2422 printk(KERN_EMERG "%s: called with no handle!\n", __FUNCTION__); 2423 } 2424 return err; 2425} 2426 2427/* 2428 * On success, We end up with an outstanding reference count against 2429 * iloc->bh. This _must_ be cleaned up later. 2430 */ 2431 2432int 2433ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 2434 struct ext3_iloc *iloc) 2435{ 2436 int err = 0; 2437 if (handle) { 2438 err = ext3_get_inode_loc(inode, iloc); 2439 if (!err) { 2440 BUFFER_TRACE(iloc->bh, "get_write_access"); 2441 err = ext3_journal_get_write_access(handle, iloc->bh); 2442 if (err) { 2443 brelse(iloc->bh); 2444 iloc->bh = NULL; 2445 } 2446 } 2447 } 2448 ext3_std_error(inode->i_sb, err); 2449 return err; 2450} 2451 2452/* 2453 * akpm: What we do here is to mark the in-core inode as clean 2454 * with respect to inode dirtiness (it may still be data-dirty). 2455 * This means that the in-core inode may be reaped by prune_icache 2456 * without having to perform any I/O. This is a very good thing, 2457 * because *any* task may call prune_icache - even ones which 2458 * have a transaction open against a different journal. 2459 * 2460 * Is this cheating? Not really. Sure, we haven't written the 2461 * inode out, but prune_icache isn't a user-visible syncing function. 2462 * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) 2463 * we start and wait on commits. 2464 * 2465 * Is this efficient/effective? Well, we're being nice to the system 2466 * by cleaning up our inodes proactively so they can be reaped 2467 * without I/O. But we are potentially leaving up to five seconds' 2468 * worth of inodes floating about which prune_icache wants us to 2469 * write out. One way to fix that would be to get prune_icache() 2470 * to do a write_super() to free up some memory. It has the desired 2471 * effect. 2472 */ 2473int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) 2474{ 2475 struct ext3_iloc iloc; 2476 int err; 2477 2478 err = ext3_reserve_inode_write(handle, inode, &iloc); 2479 if (!err) 2480 err = ext3_mark_iloc_dirty(handle, inode, &iloc); 2481 return err; 2482} 2483 2484/* 2485 * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() 2486 * 2487 * We're really interested in the case where a file is being extended. 2488 * i_size has been changed by generic_commit_write() and we thus need 2489 * to include the updated inode in the current transaction. 2490 * 2491 * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks 2492 * are allocated to the file. 2493 * 2494 * If the inode is marked synchronous, we don't honour that here - doing 2495 * so would cause a commit on atime updates, which we don't bother doing. 2496 * We handle synchronous inodes at the highest possible level. 2497 */ 2498void ext3_dirty_inode(struct inode *inode) 2499{ 2500 handle_t *current_handle = ext3_journal_current_handle(); 2501 handle_t *handle; 2502 2503 lock_kernel(); 2504 handle = ext3_journal_start(inode, 1); 2505 if (IS_ERR(handle)) 2506 goto out; 2507 if (current_handle && 2508 current_handle->h_transaction != handle->h_transaction) { 2509 /* This task has a transaction open against a different fs */ 2510 printk(KERN_EMERG "%s: transactions do not match!\n", 2511 __FUNCTION__); 2512 } else { 2513 jbd_debug(5, "marking dirty. outer handle=%p\n", 2514 current_handle); 2515 ext3_mark_inode_dirty(handle, inode); 2516 } 2517 ext3_journal_stop(handle, inode); 2518out: 2519 unlock_kernel(); 2520} 2521 2522#ifdef AKPM 2523/* 2524 * Bind an inode's backing buffer_head into this transaction, to prevent 2525 * it from being flushed to disk early. Unlike 2526 * ext3_reserve_inode_write, this leaves behind no bh reference and 2527 * returns no iloc structure, so the caller needs to repeat the iloc 2528 * lookup to mark the inode dirty later. 2529 */ 2530static inline int 2531ext3_pin_inode(handle_t *handle, struct inode *inode) 2532{ 2533 struct ext3_iloc iloc; 2534 2535 int err = 0; 2536 if (handle) { 2537 err = ext3_get_inode_loc(inode, &iloc); 2538 if (!err) { 2539 BUFFER_TRACE(iloc.bh, "get_write_access"); 2540 err = journal_get_write_access(handle, iloc.bh); 2541 if (!err) 2542 err = ext3_journal_dirty_metadata(handle, 2543 iloc.bh); 2544 brelse(iloc.bh); 2545 } 2546 } 2547 ext3_std_error(inode->i_sb, err); 2548 return err; 2549} 2550#endif 2551 2552int ext3_change_inode_journal_flag(struct inode *inode, int val) 2553{ 2554 journal_t *journal; 2555 handle_t *handle; 2556 int err; 2557 2558 /* 2559 * We have to be very careful here: changing a data block's 2560 * journaling status dynamically is dangerous. If we write a 2561 * data block to the journal, change the status and then delete 2562 * that block, we risk forgetting to revoke the old log record 2563 * from the journal and so a subsequent replay can corrupt data. 2564 * So, first we make sure that the journal is empty and that 2565 * nobody is changing anything. 2566 */ 2567 2568 journal = EXT3_JOURNAL(inode); 2569 if (is_journal_aborted(journal) || IS_RDONLY(inode)) 2570 return -EROFS; 2571 2572 journal_lock_updates(journal); 2573 journal_flush(journal); 2574 2575 /* 2576 * OK, there are no updates running now, and all cached data is 2577 * synced to disk. We are now in a completely consistent state 2578 * which doesn't have anything in the journal, and we know that 2579 * no filesystem updates are running, so it is safe to modify 2580 * the inode's in-core data-journaling state flag now. 2581 */ 2582 2583 if (val) 2584 inode->u.ext3_i.i_flags |= EXT3_JOURNAL_DATA_FL; 2585 else 2586 inode->u.ext3_i.i_flags &= ~EXT3_JOURNAL_DATA_FL; 2587 2588 journal_unlock_updates(journal); 2589 2590 /* Finally we can mark the inode as dirty. */ 2591 2592 handle = ext3_journal_start(inode, 1); 2593 if (IS_ERR(handle)) 2594 return PTR_ERR(handle); 2595 2596 err = ext3_mark_inode_dirty(handle, inode); 2597 handle->h_sync = 1; 2598 ext3_journal_stop(handle, inode); 2599 ext3_std_error(inode->i_sb, err); 2600 2601 return err; 2602} 2603 2604 2605/* 2606 * ext3_aops_journal_start(). 2607 * 2608 * <This function died, but the comment lives on> 2609 * 2610 * We need to take the inode semaphore *outside* the 2611 * journal_start/journal_stop. Otherwise, a different task could do a 2612 * wait_for_commit() while holding ->i_sem, which deadlocks. The rule 2613 * is: transaction open/closes are considered to be a locking operation 2614 * and they nest *inside* ->i_sem. 2615 * ---------------------------------------------------------------------------- 2616 * Possible problem: 2617 * ext3_file_write() 2618 * -> generic_file_write() 2619 * -> __alloc_pages() 2620 * -> page_launder() 2621 * -> ext3_writepage() 2622 * 2623 * And the writepage can be on a different fs while we have a 2624 * transaction open against this one! Bad. 2625 * 2626 * I tried making the task PF_MEMALLOC here, but that simply results in 2627 * 0-order allocation failures passed back to generic_file_write(). 2628 * Instead, we rely on the reentrancy protection in ext3_writepage(). 2629 * ---------------------------------------------------------------------------- 2630 * When we do the journal_start() here we don't really need to reserve 2631 * any blocks - we won't need any until we hit ext3_prepare_write(), 2632 * which does all the needed journal extending. However! There is a 2633 * problem with quotas: 2634 * 2635 * Thread 1: 2636 * sys_sync 2637 * ->sync_dquots 2638 * ->commit_dquot 2639 * ->lock_dquot 2640 * ->write_dquot 2641 * ->ext3_file_write 2642 * ->journal_start 2643 * ->ext3_prepare_write 2644 * ->journal_extend 2645 * ->journal_start 2646 * Thread 2: 2647 * ext3_create (for example) 2648 * ->ext3_new_inode 2649 * ->dquot_initialize 2650 * ->lock_dquot 2651 * 2652 * Deadlock. Thread 1's journal_start blocks because thread 2 has a 2653 * transaction open. Thread 2's transaction will never close because 2654 * thread 2 is stuck waiting for the dquot lock. 2655 * 2656 * So. We must ensure that thread 1 *never* needs to extend the journal 2657 * for quota writes. We do that by reserving enough journal blocks 2658 * here, in ext3_aops_journal_start() to ensure that the forthcoming "see if we 2659 * need to extend" test in ext3_prepare_write() succeeds. 2660 */ 2661