1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_bit.h" 20#include "xfs_log.h" 21#include "xfs_inum.h" 22#include "xfs_sb.h" 23#include "xfs_ag.h" 24#include "xfs_trans.h" 25#include "xfs_mount.h" 26#include "xfs_bmap_btree.h" 27#include "xfs_dinode.h" 28#include "xfs_inode.h" 29#include "xfs_alloc.h" 30#include "xfs_error.h" 31#include "xfs_rw.h" 32#include "xfs_iomap.h" 33#include "xfs_vnodeops.h" 34#include "xfs_trace.h" 35#include "xfs_bmap.h" 36#include <linux/gfp.h> 37#include <linux/mpage.h> 38#include <linux/pagevec.h> 39#include <linux/writeback.h> 40 41/* 42 * Types of I/O for bmap clustering and I/O completion tracking. 43 */ 44enum { 45 IO_READ, /* mapping for a read */ 46 IO_DELAY, /* mapping covers delalloc region */ 47 IO_UNWRITTEN, /* mapping covers allocated but uninitialized data */ 48 IO_NEW /* just allocated */ 49}; 50 51/* 52 * Prime number of hash buckets since address is used as the key. 53 */ 54#define NVSYNC 37 55#define to_ioend_wq(v) (&xfs_ioend_wq[((unsigned long)v) % NVSYNC]) 56static wait_queue_head_t xfs_ioend_wq[NVSYNC]; 57 58void __init 59xfs_ioend_init(void) 60{ 61 int i; 62 63 for (i = 0; i < NVSYNC; i++) 64 init_waitqueue_head(&xfs_ioend_wq[i]); 65} 66 67void 68xfs_ioend_wait( 69 xfs_inode_t *ip) 70{ 71 wait_queue_head_t *wq = to_ioend_wq(ip); 72 73 wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); 74} 75 76STATIC void 77xfs_ioend_wake( 78 xfs_inode_t *ip) 79{ 80 if (atomic_dec_and_test(&ip->i_iocount)) 81 wake_up(to_ioend_wq(ip)); 82} 83 84void 85xfs_count_page_state( 86 struct page *page, 87 int *delalloc, 88 int *unwritten) 89{ 90 struct buffer_head *bh, *head; 91 92 *delalloc = *unwritten = 0; 93 94 bh = head = page_buffers(page); 95 do { 96 if (buffer_unwritten(bh)) 97 (*unwritten) = 1; 98 else if (buffer_delay(bh)) 99 (*delalloc) = 1; 100 } while ((bh = bh->b_this_page) != head); 101} 102 103STATIC struct block_device * 104xfs_find_bdev_for_inode( 105 struct inode *inode) 106{ 107 struct xfs_inode *ip = XFS_I(inode); 108 struct xfs_mount *mp = ip->i_mount; 109 110 if (XFS_IS_REALTIME_INODE(ip)) 111 return mp->m_rtdev_targp->bt_bdev; 112 else 113 return mp->m_ddev_targp->bt_bdev; 114} 115 116/* 117 * We're now finished for good with this ioend structure. 118 * Update the page state via the associated buffer_heads, 119 * release holds on the inode and bio, and finally free 120 * up memory. Do not use the ioend after this. 121 */ 122STATIC void 123xfs_destroy_ioend( 124 xfs_ioend_t *ioend) 125{ 126 struct buffer_head *bh, *next; 127 struct xfs_inode *ip = XFS_I(ioend->io_inode); 128 129 for (bh = ioend->io_buffer_head; bh; bh = next) { 130 next = bh->b_private; 131 bh->b_end_io(bh, !ioend->io_error); 132 } 133 134 /* 135 * Volume managers supporting multiple paths can send back ENODEV 136 * when the final path disappears. In this case continuing to fill 137 * the page cache with dirty data which cannot be written out is 138 * evil, so prevent that. 139 */ 140 if (unlikely(ioend->io_error == -ENODEV)) { 141 xfs_do_force_shutdown(ip->i_mount, SHUTDOWN_DEVICE_REQ, 142 __FILE__, __LINE__); 143 } 144 145 xfs_ioend_wake(ip); 146 mempool_free(ioend, xfs_ioend_pool); 147} 148 149/* 150 * If the end of the current ioend is beyond the current EOF, 151 * return the new EOF value, otherwise zero. 152 */ 153STATIC xfs_fsize_t 154xfs_ioend_new_eof( 155 xfs_ioend_t *ioend) 156{ 157 xfs_inode_t *ip = XFS_I(ioend->io_inode); 158 xfs_fsize_t isize; 159 xfs_fsize_t bsize; 160 161 bsize = ioend->io_offset + ioend->io_size; 162 isize = MAX(ip->i_size, ip->i_new_size); 163 isize = MIN(isize, bsize); 164 return isize > ip->i_d.di_size ? isize : 0; 165} 166 167/* 168 * Update on-disk file size now that data has been written to disk. The 169 * current in-memory file size is i_size. If a write is beyond eof i_new_size 170 * will be the intended file size until i_size is updated. If this write does 171 * not extend all the way to the valid file size then restrict this update to 172 * the end of the write. 173 * 174 * This function does not block as blocking on the inode lock in IO completion 175 * can lead to IO completion order dependency deadlocks.. If it can't get the 176 * inode ilock it will return EAGAIN. Callers must handle this. 177 */ 178STATIC int 179xfs_setfilesize( 180 xfs_ioend_t *ioend) 181{ 182 xfs_inode_t *ip = XFS_I(ioend->io_inode); 183 xfs_fsize_t isize; 184 185 ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFREG); 186 ASSERT(ioend->io_type != IO_READ); 187 188 if (unlikely(ioend->io_error)) 189 return 0; 190 191 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) 192 return EAGAIN; 193 194 isize = xfs_ioend_new_eof(ioend); 195 if (isize) { 196 ip->i_d.di_size = isize; 197 xfs_mark_inode_dirty(ip); 198 } 199 200 xfs_iunlock(ip, XFS_ILOCK_EXCL); 201 return 0; 202} 203 204/* 205 * Schedule IO completion handling on the final put of an ioend. 206 */ 207STATIC void 208xfs_finish_ioend( 209 struct xfs_ioend *ioend) 210{ 211 if (atomic_dec_and_test(&ioend->io_remaining)) { 212 if (ioend->io_type == IO_UNWRITTEN) 213 queue_work(xfsconvertd_workqueue, &ioend->io_work); 214 else 215 queue_work(xfsdatad_workqueue, &ioend->io_work); 216 } 217} 218 219/* 220 * IO write completion. 221 */ 222STATIC void 223xfs_end_io( 224 struct work_struct *work) 225{ 226 xfs_ioend_t *ioend = container_of(work, xfs_ioend_t, io_work); 227 struct xfs_inode *ip = XFS_I(ioend->io_inode); 228 int error = 0; 229 230 /* 231 * For unwritten extents we need to issue transactions to convert a 232 * range to normal written extens after the data I/O has finished. 233 */ 234 if (ioend->io_type == IO_UNWRITTEN && 235 likely(!ioend->io_error && !XFS_FORCED_SHUTDOWN(ip->i_mount))) { 236 237 error = xfs_iomap_write_unwritten(ip, ioend->io_offset, 238 ioend->io_size); 239 if (error) 240 ioend->io_error = error; 241 } 242 243 /* 244 * We might have to update the on-disk file size after extending 245 * writes. 246 */ 247 if (ioend->io_type != IO_READ) { 248 error = xfs_setfilesize(ioend); 249 ASSERT(!error || error == EAGAIN); 250 } 251 252 /* 253 * If we didn't complete processing of the ioend, requeue it to the 254 * tail of the workqueue for another attempt later. Otherwise destroy 255 * it. 256 */ 257 if (error == EAGAIN) { 258 atomic_inc(&ioend->io_remaining); 259 xfs_finish_ioend(ioend); 260 /* ensure we don't spin on blocked ioends */ 261 delay(1); 262 } else { 263 if (ioend->io_iocb) 264 aio_complete(ioend->io_iocb, ioend->io_result, 0); 265 xfs_destroy_ioend(ioend); 266 } 267} 268 269/* 270 * Call IO completion handling in caller context on the final put of an ioend. 271 */ 272STATIC void 273xfs_finish_ioend_sync( 274 struct xfs_ioend *ioend) 275{ 276 if (atomic_dec_and_test(&ioend->io_remaining)) 277 xfs_end_io(&ioend->io_work); 278} 279 280/* 281 * Allocate and initialise an IO completion structure. 282 * We need to track unwritten extent write completion here initially. 283 * We'll need to extend this for updating the ondisk inode size later 284 * (vs. incore size). 285 */ 286STATIC xfs_ioend_t * 287xfs_alloc_ioend( 288 struct inode *inode, 289 unsigned int type) 290{ 291 xfs_ioend_t *ioend; 292 293 ioend = mempool_alloc(xfs_ioend_pool, GFP_NOFS); 294 295 /* 296 * Set the count to 1 initially, which will prevent an I/O 297 * completion callback from happening before we have started 298 * all the I/O from calling the completion routine too early. 299 */ 300 atomic_set(&ioend->io_remaining, 1); 301 ioend->io_error = 0; 302 ioend->io_list = NULL; 303 ioend->io_type = type; 304 ioend->io_inode = inode; 305 ioend->io_buffer_head = NULL; 306 ioend->io_buffer_tail = NULL; 307 atomic_inc(&XFS_I(ioend->io_inode)->i_iocount); 308 ioend->io_offset = 0; 309 ioend->io_size = 0; 310 ioend->io_iocb = NULL; 311 ioend->io_result = 0; 312 313 INIT_WORK(&ioend->io_work, xfs_end_io); 314 return ioend; 315} 316 317STATIC int 318xfs_map_blocks( 319 struct inode *inode, 320 loff_t offset, 321 ssize_t count, 322 struct xfs_bmbt_irec *imap, 323 int flags) 324{ 325 int nmaps = 1; 326 int new = 0; 327 328 return -xfs_iomap(XFS_I(inode), offset, count, flags, imap, &nmaps, &new); 329} 330 331STATIC int 332xfs_imap_valid( 333 struct inode *inode, 334 struct xfs_bmbt_irec *imap, 335 xfs_off_t offset) 336{ 337 offset >>= inode->i_blkbits; 338 339 return offset >= imap->br_startoff && 340 offset < imap->br_startoff + imap->br_blockcount; 341} 342 343/* 344 * BIO completion handler for buffered IO. 345 */ 346STATIC void 347xfs_end_bio( 348 struct bio *bio, 349 int error) 350{ 351 xfs_ioend_t *ioend = bio->bi_private; 352 353 ASSERT(atomic_read(&bio->bi_cnt) >= 1); 354 ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error; 355 356 /* Toss bio and pass work off to an xfsdatad thread */ 357 bio->bi_private = NULL; 358 bio->bi_end_io = NULL; 359 bio_put(bio); 360 361 xfs_finish_ioend(ioend); 362} 363 364STATIC void 365xfs_submit_ioend_bio( 366 struct writeback_control *wbc, 367 xfs_ioend_t *ioend, 368 struct bio *bio) 369{ 370 atomic_inc(&ioend->io_remaining); 371 bio->bi_private = ioend; 372 bio->bi_end_io = xfs_end_bio; 373 374 /* 375 * If the I/O is beyond EOF we mark the inode dirty immediately 376 * but don't update the inode size until I/O completion. 377 */ 378 if (xfs_ioend_new_eof(ioend)) 379 xfs_mark_inode_dirty(XFS_I(ioend->io_inode)); 380 381 submit_bio(wbc->sync_mode == WB_SYNC_ALL ? 382 WRITE_SYNC_PLUG : WRITE, bio); 383 ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP)); 384 bio_put(bio); 385} 386 387STATIC struct bio * 388xfs_alloc_ioend_bio( 389 struct buffer_head *bh) 390{ 391 struct bio *bio; 392 int nvecs = bio_get_nr_vecs(bh->b_bdev); 393 394 do { 395 bio = bio_alloc(GFP_NOIO, nvecs); 396 nvecs >>= 1; 397 } while (!bio); 398 399 ASSERT(bio->bi_private == NULL); 400 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 401 bio->bi_bdev = bh->b_bdev; 402 bio_get(bio); 403 return bio; 404} 405 406STATIC void 407xfs_start_buffer_writeback( 408 struct buffer_head *bh) 409{ 410 ASSERT(buffer_mapped(bh)); 411 ASSERT(buffer_locked(bh)); 412 ASSERT(!buffer_delay(bh)); 413 ASSERT(!buffer_unwritten(bh)); 414 415 mark_buffer_async_write(bh); 416 set_buffer_uptodate(bh); 417 clear_buffer_dirty(bh); 418} 419 420STATIC void 421xfs_start_page_writeback( 422 struct page *page, 423 int clear_dirty, 424 int buffers) 425{ 426 ASSERT(PageLocked(page)); 427 ASSERT(!PageWriteback(page)); 428 if (clear_dirty) 429 clear_page_dirty_for_io(page); 430 set_page_writeback(page); 431 unlock_page(page); 432 /* If no buffers on the page are to be written, finish it here */ 433 if (!buffers) 434 end_page_writeback(page); 435} 436 437static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh) 438{ 439 return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); 440} 441 442/* 443 * Submit all of the bios for all of the ioends we have saved up, covering the 444 * initial writepage page and also any probed pages. 445 * 446 * Because we may have multiple ioends spanning a page, we need to start 447 * writeback on all the buffers before we submit them for I/O. If we mark the 448 * buffers as we got, then we can end up with a page that only has buffers 449 * marked async write and I/O complete on can occur before we mark the other 450 * buffers async write. 451 * 452 * The end result of this is that we trip a bug in end_page_writeback() because 453 * we call it twice for the one page as the code in end_buffer_async_write() 454 * assumes that all buffers on the page are started at the same time. 455 * 456 * The fix is two passes across the ioend list - one to start writeback on the 457 * buffer_heads, and then submit them for I/O on the second pass. 458 */ 459STATIC void 460xfs_submit_ioend( 461 struct writeback_control *wbc, 462 xfs_ioend_t *ioend) 463{ 464 xfs_ioend_t *head = ioend; 465 xfs_ioend_t *next; 466 struct buffer_head *bh; 467 struct bio *bio; 468 sector_t lastblock = 0; 469 470 /* Pass 1 - start writeback */ 471 do { 472 next = ioend->io_list; 473 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 474 xfs_start_buffer_writeback(bh); 475 } 476 } while ((ioend = next) != NULL); 477 478 /* Pass 2 - submit I/O */ 479 ioend = head; 480 do { 481 next = ioend->io_list; 482 bio = NULL; 483 484 for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) { 485 486 if (!bio) { 487 retry: 488 bio = xfs_alloc_ioend_bio(bh); 489 } else if (bh->b_blocknr != lastblock + 1) { 490 xfs_submit_ioend_bio(wbc, ioend, bio); 491 goto retry; 492 } 493 494 if (bio_add_buffer(bio, bh) != bh->b_size) { 495 xfs_submit_ioend_bio(wbc, ioend, bio); 496 goto retry; 497 } 498 499 lastblock = bh->b_blocknr; 500 } 501 if (bio) 502 xfs_submit_ioend_bio(wbc, ioend, bio); 503 xfs_finish_ioend(ioend); 504 } while ((ioend = next) != NULL); 505} 506 507/* 508 * Cancel submission of all buffer_heads so far in this endio. 509 * Toss the endio too. Only ever called for the initial page 510 * in a writepage request, so only ever one page. 511 */ 512STATIC void 513xfs_cancel_ioend( 514 xfs_ioend_t *ioend) 515{ 516 xfs_ioend_t *next; 517 struct buffer_head *bh, *next_bh; 518 519 do { 520 next = ioend->io_list; 521 bh = ioend->io_buffer_head; 522 do { 523 next_bh = bh->b_private; 524 clear_buffer_async_write(bh); 525 unlock_buffer(bh); 526 } while ((bh = next_bh) != NULL); 527 528 xfs_ioend_wake(XFS_I(ioend->io_inode)); 529 mempool_free(ioend, xfs_ioend_pool); 530 } while ((ioend = next) != NULL); 531} 532 533/* 534 * Test to see if we've been building up a completion structure for 535 * earlier buffers -- if so, we try to append to this ioend if we 536 * can, otherwise we finish off any current ioend and start another. 537 * Return true if we've finished the given ioend. 538 */ 539STATIC void 540xfs_add_to_ioend( 541 struct inode *inode, 542 struct buffer_head *bh, 543 xfs_off_t offset, 544 unsigned int type, 545 xfs_ioend_t **result, 546 int need_ioend) 547{ 548 xfs_ioend_t *ioend = *result; 549 550 if (!ioend || need_ioend || type != ioend->io_type) { 551 xfs_ioend_t *previous = *result; 552 553 ioend = xfs_alloc_ioend(inode, type); 554 ioend->io_offset = offset; 555 ioend->io_buffer_head = bh; 556 ioend->io_buffer_tail = bh; 557 if (previous) 558 previous->io_list = ioend; 559 *result = ioend; 560 } else { 561 ioend->io_buffer_tail->b_private = bh; 562 ioend->io_buffer_tail = bh; 563 } 564 565 bh->b_private = NULL; 566 ioend->io_size += bh->b_size; 567} 568 569STATIC void 570xfs_map_buffer( 571 struct inode *inode, 572 struct buffer_head *bh, 573 struct xfs_bmbt_irec *imap, 574 xfs_off_t offset) 575{ 576 sector_t bn; 577 struct xfs_mount *m = XFS_I(inode)->i_mount; 578 xfs_off_t iomap_offset = XFS_FSB_TO_B(m, imap->br_startoff); 579 xfs_daddr_t iomap_bn = xfs_fsb_to_db(XFS_I(inode), imap->br_startblock); 580 581 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 582 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 583 584 bn = (iomap_bn >> (inode->i_blkbits - BBSHIFT)) + 585 ((offset - iomap_offset) >> inode->i_blkbits); 586 587 ASSERT(bn || XFS_IS_REALTIME_INODE(XFS_I(inode))); 588 589 bh->b_blocknr = bn; 590 set_buffer_mapped(bh); 591} 592 593STATIC void 594xfs_map_at_offset( 595 struct inode *inode, 596 struct buffer_head *bh, 597 struct xfs_bmbt_irec *imap, 598 xfs_off_t offset) 599{ 600 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 601 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 602 603 lock_buffer(bh); 604 xfs_map_buffer(inode, bh, imap, offset); 605 bh->b_bdev = xfs_find_bdev_for_inode(inode); 606 set_buffer_mapped(bh); 607 clear_buffer_delay(bh); 608 clear_buffer_unwritten(bh); 609} 610 611/* 612 * Look for a page at index that is suitable for clustering. 613 */ 614STATIC unsigned int 615xfs_probe_page( 616 struct page *page, 617 unsigned int pg_offset) 618{ 619 struct buffer_head *bh, *head; 620 int ret = 0; 621 622 if (PageWriteback(page)) 623 return 0; 624 if (!PageDirty(page)) 625 return 0; 626 if (!page->mapping) 627 return 0; 628 if (!page_has_buffers(page)) 629 return 0; 630 631 bh = head = page_buffers(page); 632 do { 633 if (!buffer_uptodate(bh)) 634 break; 635 if (!buffer_mapped(bh)) 636 break; 637 ret += bh->b_size; 638 if (ret >= pg_offset) 639 break; 640 } while ((bh = bh->b_this_page) != head); 641 642 return ret; 643} 644 645STATIC size_t 646xfs_probe_cluster( 647 struct inode *inode, 648 struct page *startpage, 649 struct buffer_head *bh, 650 struct buffer_head *head) 651{ 652 struct pagevec pvec; 653 pgoff_t tindex, tlast, tloff; 654 size_t total = 0; 655 int done = 0, i; 656 657 /* First sum forwards in this page */ 658 do { 659 if (!buffer_uptodate(bh) || !buffer_mapped(bh)) 660 return total; 661 total += bh->b_size; 662 } while ((bh = bh->b_this_page) != head); 663 664 /* if we reached the end of the page, sum forwards in following pages */ 665 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT; 666 tindex = startpage->index + 1; 667 668 /* Prune this back to avoid pathological behavior */ 669 tloff = min(tlast, startpage->index + 64); 670 671 pagevec_init(&pvec, 0); 672 while (!done && tindex <= tloff) { 673 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 674 675 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 676 break; 677 678 for (i = 0; i < pagevec_count(&pvec); i++) { 679 struct page *page = pvec.pages[i]; 680 size_t pg_offset, pg_len = 0; 681 682 if (tindex == tlast) { 683 pg_offset = 684 i_size_read(inode) & (PAGE_CACHE_SIZE - 1); 685 if (!pg_offset) { 686 done = 1; 687 break; 688 } 689 } else 690 pg_offset = PAGE_CACHE_SIZE; 691 692 if (page->index == tindex && trylock_page(page)) { 693 pg_len = xfs_probe_page(page, pg_offset); 694 unlock_page(page); 695 } 696 697 if (!pg_len) { 698 done = 1; 699 break; 700 } 701 702 total += pg_len; 703 tindex++; 704 } 705 706 pagevec_release(&pvec); 707 cond_resched(); 708 } 709 710 return total; 711} 712 713/* 714 * Test if a given page is suitable for writing as part of an unwritten 715 * or delayed allocate extent. 716 */ 717STATIC int 718xfs_is_delayed_page( 719 struct page *page, 720 unsigned int type) 721{ 722 if (PageWriteback(page)) 723 return 0; 724 725 if (page->mapping && page_has_buffers(page)) { 726 struct buffer_head *bh, *head; 727 int acceptable = 0; 728 729 bh = head = page_buffers(page); 730 do { 731 if (buffer_unwritten(bh)) 732 acceptable = (type == IO_UNWRITTEN); 733 else if (buffer_delay(bh)) 734 acceptable = (type == IO_DELAY); 735 else if (buffer_dirty(bh) && buffer_mapped(bh)) 736 acceptable = (type == IO_NEW); 737 else 738 break; 739 } while ((bh = bh->b_this_page) != head); 740 741 if (acceptable) 742 return 1; 743 } 744 745 return 0; 746} 747 748/* 749 * Allocate & map buffers for page given the extent map. Write it out. 750 * except for the original page of a writepage, this is called on 751 * delalloc/unwritten pages only, for the original page it is possible 752 * that the page has no mapping at all. 753 */ 754STATIC int 755xfs_convert_page( 756 struct inode *inode, 757 struct page *page, 758 loff_t tindex, 759 struct xfs_bmbt_irec *imap, 760 xfs_ioend_t **ioendp, 761 struct writeback_control *wbc, 762 int all_bh) 763{ 764 struct buffer_head *bh, *head; 765 xfs_off_t end_offset; 766 unsigned long p_offset; 767 unsigned int type; 768 int len, page_dirty; 769 int count = 0, done = 0, uptodate = 1; 770 xfs_off_t offset = page_offset(page); 771 772 if (page->index != tindex) 773 goto fail; 774 if (!trylock_page(page)) 775 goto fail; 776 if (PageWriteback(page)) 777 goto fail_unlock_page; 778 if (page->mapping != inode->i_mapping) 779 goto fail_unlock_page; 780 if (!xfs_is_delayed_page(page, (*ioendp)->io_type)) 781 goto fail_unlock_page; 782 783 /* 784 * page_dirty is initially a count of buffers on the page before 785 * EOF and is decremented as we move each into a cleanable state. 786 * 787 * Derivation: 788 * 789 * End offset is the highest offset that this page should represent. 790 * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1)) 791 * will evaluate non-zero and be less than PAGE_CACHE_SIZE and 792 * hence give us the correct page_dirty count. On any other page, 793 * it will be zero and in that case we need page_dirty to be the 794 * count of buffers on the page. 795 */ 796 end_offset = min_t(unsigned long long, 797 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 798 i_size_read(inode)); 799 800 len = 1 << inode->i_blkbits; 801 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 802 PAGE_CACHE_SIZE); 803 p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE; 804 page_dirty = p_offset / len; 805 806 bh = head = page_buffers(page); 807 do { 808 if (offset >= end_offset) 809 break; 810 if (!buffer_uptodate(bh)) 811 uptodate = 0; 812 if (!(PageUptodate(page) || buffer_uptodate(bh))) { 813 done = 1; 814 continue; 815 } 816 817 if (buffer_unwritten(bh) || buffer_delay(bh)) { 818 if (buffer_unwritten(bh)) 819 type = IO_UNWRITTEN; 820 else 821 type = IO_DELAY; 822 823 if (!xfs_imap_valid(inode, imap, offset)) { 824 done = 1; 825 continue; 826 } 827 828 ASSERT(imap->br_startblock != HOLESTARTBLOCK); 829 ASSERT(imap->br_startblock != DELAYSTARTBLOCK); 830 831 xfs_map_at_offset(inode, bh, imap, offset); 832 xfs_add_to_ioend(inode, bh, offset, type, 833 ioendp, done); 834 835 page_dirty--; 836 count++; 837 } else { 838 type = IO_NEW; 839 if (buffer_mapped(bh) && all_bh) { 840 lock_buffer(bh); 841 xfs_add_to_ioend(inode, bh, offset, 842 type, ioendp, done); 843 count++; 844 page_dirty--; 845 } else { 846 done = 1; 847 } 848 } 849 } while (offset += len, (bh = bh->b_this_page) != head); 850 851 if (uptodate && bh == head) 852 SetPageUptodate(page); 853 854 if (count) { 855 if (--wbc->nr_to_write <= 0 && 856 wbc->sync_mode == WB_SYNC_NONE) 857 done = 1; 858 } 859 xfs_start_page_writeback(page, !page_dirty, count); 860 861 return done; 862 fail_unlock_page: 863 unlock_page(page); 864 fail: 865 return 1; 866} 867 868/* 869 * Convert & write out a cluster of pages in the same extent as defined 870 * by mp and following the start page. 871 */ 872STATIC void 873xfs_cluster_write( 874 struct inode *inode, 875 pgoff_t tindex, 876 struct xfs_bmbt_irec *imap, 877 xfs_ioend_t **ioendp, 878 struct writeback_control *wbc, 879 int all_bh, 880 pgoff_t tlast) 881{ 882 struct pagevec pvec; 883 int done = 0, i; 884 885 pagevec_init(&pvec, 0); 886 while (!done && tindex <= tlast) { 887 unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1); 888 889 if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len)) 890 break; 891 892 for (i = 0; i < pagevec_count(&pvec); i++) { 893 done = xfs_convert_page(inode, pvec.pages[i], tindex++, 894 imap, ioendp, wbc, all_bh); 895 if (done) 896 break; 897 } 898 899 pagevec_release(&pvec); 900 cond_resched(); 901 } 902} 903 904STATIC void 905xfs_vm_invalidatepage( 906 struct page *page, 907 unsigned long offset) 908{ 909 trace_xfs_invalidatepage(page->mapping->host, page, offset); 910 block_invalidatepage(page, offset); 911} 912 913/* 914 * If the page has delalloc buffers on it, we need to punch them out before we 915 * invalidate the page. If we don't, we leave a stale delalloc mapping on the 916 * inode that can trip a BUG() in xfs_get_blocks() later on if a direct IO read 917 * is done on that same region - the delalloc extent is returned when none is 918 * supposed to be there. 919 * 920 * We prevent this by truncating away the delalloc regions on the page before 921 * invalidating it. Because they are delalloc, we can do this without needing a 922 * transaction. Indeed - if we get ENOSPC errors, we have to be able to do this 923 * truncation without a transaction as there is no space left for block 924 * reservation (typically why we see a ENOSPC in writeback). 925 * 926 * This is not a performance critical path, so for now just do the punching a 927 * buffer head at a time. 928 */ 929STATIC void 930xfs_aops_discard_page( 931 struct page *page) 932{ 933 struct inode *inode = page->mapping->host; 934 struct xfs_inode *ip = XFS_I(inode); 935 struct buffer_head *bh, *head; 936 loff_t offset = page_offset(page); 937 ssize_t len = 1 << inode->i_blkbits; 938 939 if (!xfs_is_delayed_page(page, IO_DELAY)) 940 goto out_invalidate; 941 942 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 943 goto out_invalidate; 944 945 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 946 "page discard on page %p, inode 0x%llx, offset %llu.", 947 page, ip->i_ino, offset); 948 949 xfs_ilock(ip, XFS_ILOCK_EXCL); 950 bh = head = page_buffers(page); 951 do { 952 int done; 953 xfs_fileoff_t offset_fsb; 954 xfs_bmbt_irec_t imap; 955 int nimaps = 1; 956 int error; 957 xfs_fsblock_t firstblock; 958 xfs_bmap_free_t flist; 959 960 if (!buffer_delay(bh)) 961 goto next_buffer; 962 963 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 964 965 /* 966 * Map the range first and check that it is a delalloc extent 967 * before trying to unmap the range. Otherwise we will be 968 * trying to remove a real extent (which requires a 969 * transaction) or a hole, which is probably a bad idea... 970 */ 971 error = xfs_bmapi(NULL, ip, offset_fsb, 1, 972 XFS_BMAPI_ENTIRE, NULL, 0, &imap, 973 &nimaps, NULL); 974 975 if (error) { 976 /* something screwed, just bail */ 977 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 978 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 979 "page discard failed delalloc mapping lookup."); 980 } 981 break; 982 } 983 if (!nimaps) { 984 /* nothing there */ 985 goto next_buffer; 986 } 987 if (imap.br_startblock != DELAYSTARTBLOCK) { 988 /* been converted, ignore */ 989 goto next_buffer; 990 } 991 WARN_ON(imap.br_blockcount == 0); 992 993 /* 994 * Note: while we initialise the firstblock/flist pair, they 995 * should never be used because blocks should never be 996 * allocated or freed for a delalloc extent and hence we need 997 * don't cancel or finish them after the xfs_bunmapi() call. 998 */ 999 xfs_bmap_init(&flist, &firstblock); 1000 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock, 1001 &flist, &done); 1002 1003 ASSERT(!flist.xbf_count && !flist.xbf_first); 1004 if (error) { 1005 /* something screwed, just bail */ 1006 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 1007 xfs_fs_cmn_err(CE_ALERT, ip->i_mount, 1008 "page discard unable to remove delalloc mapping."); 1009 } 1010 break; 1011 } 1012next_buffer: 1013 offset += len; 1014 1015 } while ((bh = bh->b_this_page) != head); 1016 1017 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1018out_invalidate: 1019 xfs_vm_invalidatepage(page, 0); 1020 return; 1021} 1022 1023/* 1024 * Write out a dirty page. 1025 * 1026 * For delalloc space on the page we need to allocate space and flush it. 1027 * For unwritten space on the page we need to start the conversion to 1028 * regular allocated space. 1029 * For any other dirty buffer heads on the page we should flush them. 1030 * 1031 * If we detect that a transaction would be required to flush the page, we 1032 * have to check the process flags first, if we are already in a transaction 1033 * or disk I/O during allocations is off, we need to fail the writepage and 1034 * redirty the page. 1035 */ 1036STATIC int 1037xfs_vm_writepage( 1038 struct page *page, 1039 struct writeback_control *wbc) 1040{ 1041 struct inode *inode = page->mapping->host; 1042 int delalloc, unwritten; 1043 struct buffer_head *bh, *head; 1044 struct xfs_bmbt_irec imap; 1045 xfs_ioend_t *ioend = NULL, *iohead = NULL; 1046 loff_t offset; 1047 unsigned int type; 1048 __uint64_t end_offset; 1049 pgoff_t end_index, last_index; 1050 ssize_t size, len; 1051 int flags, err, imap_valid = 0, uptodate = 1; 1052 int count = 0; 1053 int all_bh = 0; 1054 1055 trace_xfs_writepage(inode, page, 0); 1056 1057 ASSERT(page_has_buffers(page)); 1058 1059 /* 1060 * Refuse to write the page out if we are called from reclaim context. 1061 * 1062 * This avoids stack overflows when called from deeply used stacks in 1063 * random callers for direct reclaim or memcg reclaim. We explicitly 1064 * allow reclaim from kswapd as the stack usage there is relatively low. 1065 * 1066 * This should really be done by the core VM, but until that happens 1067 * filesystems like XFS, btrfs and ext4 have to take care of this 1068 * by themselves. 1069 */ 1070 if ((current->flags & (PF_MEMALLOC|PF_KSWAPD)) == PF_MEMALLOC) 1071 goto redirty; 1072 1073 /* 1074 * We need a transaction if there are delalloc or unwritten buffers 1075 * on the page. 1076 * 1077 * If we need a transaction and the process flags say we are already 1078 * in a transaction, or no IO is allowed then mark the page dirty 1079 * again and leave the page as is. 1080 */ 1081 xfs_count_page_state(page, &delalloc, &unwritten); 1082 if ((current->flags & PF_FSTRANS) && (delalloc || unwritten)) 1083 goto redirty; 1084 1085 /* Is this page beyond the end of the file? */ 1086 offset = i_size_read(inode); 1087 end_index = offset >> PAGE_CACHE_SHIFT; 1088 last_index = (offset - 1) >> PAGE_CACHE_SHIFT; 1089 if (page->index >= end_index) { 1090 if ((page->index >= end_index + 1) || 1091 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) { 1092 unlock_page(page); 1093 return 0; 1094 } 1095 } 1096 1097 end_offset = min_t(unsigned long long, 1098 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 1099 offset); 1100 len = 1 << inode->i_blkbits; 1101 1102 bh = head = page_buffers(page); 1103 offset = page_offset(page); 1104 flags = BMAPI_READ; 1105 type = IO_NEW; 1106 1107 do { 1108 if (offset >= end_offset) 1109 break; 1110 if (!buffer_uptodate(bh)) 1111 uptodate = 0; 1112 1113 /* 1114 * A hole may still be marked uptodate because discard_buffer 1115 * leaves the flag set. 1116 */ 1117 if (!buffer_mapped(bh) && buffer_uptodate(bh)) { 1118 ASSERT(!buffer_dirty(bh)); 1119 imap_valid = 0; 1120 continue; 1121 } 1122 1123 if (imap_valid) 1124 imap_valid = xfs_imap_valid(inode, &imap, offset); 1125 1126 if (buffer_unwritten(bh) || buffer_delay(bh)) { 1127 int new_ioend = 0; 1128 1129 /* 1130 * Make sure we don't use a read-only iomap 1131 */ 1132 if (flags == BMAPI_READ) 1133 imap_valid = 0; 1134 1135 if (buffer_unwritten(bh)) { 1136 type = IO_UNWRITTEN; 1137 flags = BMAPI_WRITE | BMAPI_IGNSTATE; 1138 } else if (buffer_delay(bh)) { 1139 type = IO_DELAY; 1140 flags = BMAPI_ALLOCATE; 1141 1142 if (wbc->sync_mode == WB_SYNC_NONE && 1143 wbc->nonblocking) 1144 flags |= BMAPI_TRYLOCK; 1145 } 1146 1147 if (!imap_valid) { 1148 /* 1149 * If we didn't have a valid mapping then we 1150 * need to ensure that we put the new mapping 1151 * in a new ioend structure. This needs to be 1152 * done to ensure that the ioends correctly 1153 * reflect the block mappings at io completion 1154 * for unwritten extent conversion. 1155 */ 1156 new_ioend = 1; 1157 err = xfs_map_blocks(inode, offset, len, 1158 &imap, flags); 1159 if (err) 1160 goto error; 1161 imap_valid = xfs_imap_valid(inode, &imap, 1162 offset); 1163 } 1164 if (imap_valid) { 1165 xfs_map_at_offset(inode, bh, &imap, offset); 1166 xfs_add_to_ioend(inode, bh, offset, type, 1167 &ioend, new_ioend); 1168 count++; 1169 } 1170 } else if (buffer_uptodate(bh)) { 1171 /* 1172 * we got here because the buffer is already mapped. 1173 * That means it must already have extents allocated 1174 * underneath it. Map the extent by reading it. 1175 */ 1176 if (!imap_valid || flags != BMAPI_READ) { 1177 flags = BMAPI_READ; 1178 size = xfs_probe_cluster(inode, page, bh, head); 1179 err = xfs_map_blocks(inode, offset, size, 1180 &imap, flags); 1181 if (err) 1182 goto error; 1183 imap_valid = xfs_imap_valid(inode, &imap, 1184 offset); 1185 } 1186 1187 /* 1188 * We set the type to IO_NEW in case we are doing a 1189 * small write at EOF that is extending the file but 1190 * without needing an allocation. We need to update the 1191 * file size on I/O completion in this case so it is 1192 * the same case as having just allocated a new extent 1193 * that we are writing into for the first time. 1194 */ 1195 type = IO_NEW; 1196 if (trylock_buffer(bh)) { 1197 if (imap_valid) 1198 all_bh = 1; 1199 xfs_add_to_ioend(inode, bh, offset, type, 1200 &ioend, !imap_valid); 1201 count++; 1202 } else { 1203 imap_valid = 0; 1204 } 1205 } else if (PageUptodate(page)) { 1206 ASSERT(buffer_mapped(bh)); 1207 imap_valid = 0; 1208 } 1209 1210 if (!iohead) 1211 iohead = ioend; 1212 1213 } while (offset += len, ((bh = bh->b_this_page) != head)); 1214 1215 if (uptodate && bh == head) 1216 SetPageUptodate(page); 1217 1218 xfs_start_page_writeback(page, 1, count); 1219 1220 if (ioend && imap_valid) { 1221 xfs_off_t end_index; 1222 1223 end_index = imap.br_startoff + imap.br_blockcount; 1224 1225 /* to bytes */ 1226 end_index <<= inode->i_blkbits; 1227 1228 /* to pages */ 1229 end_index = (end_index - 1) >> PAGE_CACHE_SHIFT; 1230 1231 /* check against file size */ 1232 if (end_index > last_index) 1233 end_index = last_index; 1234 1235 xfs_cluster_write(inode, page->index + 1, &imap, &ioend, 1236 wbc, all_bh, end_index); 1237 } 1238 1239 if (iohead) 1240 xfs_submit_ioend(wbc, iohead); 1241 1242 return 0; 1243 1244error: 1245 if (iohead) 1246 xfs_cancel_ioend(iohead); 1247 1248 if (err == -EAGAIN) 1249 goto redirty; 1250 1251 xfs_aops_discard_page(page); 1252 ClearPageUptodate(page); 1253 unlock_page(page); 1254 return err; 1255 1256redirty: 1257 redirty_page_for_writepage(wbc, page); 1258 unlock_page(page); 1259 return 0; 1260} 1261 1262STATIC int 1263xfs_vm_writepages( 1264 struct address_space *mapping, 1265 struct writeback_control *wbc) 1266{ 1267 xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED); 1268 return generic_writepages(mapping, wbc); 1269} 1270 1271/* 1272 * Called to move a page into cleanable state - and from there 1273 * to be released. The page should already be clean. We always 1274 * have buffer heads in this call. 1275 * 1276 * Returns 1 if the page is ok to release, 0 otherwise. 1277 */ 1278STATIC int 1279xfs_vm_releasepage( 1280 struct page *page, 1281 gfp_t gfp_mask) 1282{ 1283 int delalloc, unwritten; 1284 1285 trace_xfs_releasepage(page->mapping->host, page, 0); 1286 1287 xfs_count_page_state(page, &delalloc, &unwritten); 1288 1289 if (WARN_ON(delalloc)) 1290 return 0; 1291 if (WARN_ON(unwritten)) 1292 return 0; 1293 1294 return try_to_free_buffers(page); 1295} 1296 1297STATIC int 1298__xfs_get_blocks( 1299 struct inode *inode, 1300 sector_t iblock, 1301 struct buffer_head *bh_result, 1302 int create, 1303 int direct) 1304{ 1305 int flags = create ? BMAPI_WRITE : BMAPI_READ; 1306 struct xfs_bmbt_irec imap; 1307 xfs_off_t offset; 1308 ssize_t size; 1309 int nimap = 1; 1310 int new = 0; 1311 int error; 1312 1313 offset = (xfs_off_t)iblock << inode->i_blkbits; 1314 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits)); 1315 size = bh_result->b_size; 1316 1317 if (!create && direct && offset >= i_size_read(inode)) 1318 return 0; 1319 1320 if (direct && create) 1321 flags |= BMAPI_DIRECT; 1322 1323 error = xfs_iomap(XFS_I(inode), offset, size, flags, &imap, &nimap, 1324 &new); 1325 if (error) 1326 return -error; 1327 if (nimap == 0) 1328 return 0; 1329 1330 if (imap.br_startblock != HOLESTARTBLOCK && 1331 imap.br_startblock != DELAYSTARTBLOCK) { 1332 /* 1333 * For unwritten extents do not report a disk address on 1334 * the read case (treat as if we're reading into a hole). 1335 */ 1336 if (create || !ISUNWRITTEN(&imap)) 1337 xfs_map_buffer(inode, bh_result, &imap, offset); 1338 if (create && ISUNWRITTEN(&imap)) { 1339 if (direct) 1340 bh_result->b_private = inode; 1341 set_buffer_unwritten(bh_result); 1342 } 1343 } 1344 1345 /* 1346 * If this is a realtime file, data may be on a different device. 1347 * to that pointed to from the buffer_head b_bdev currently. 1348 */ 1349 bh_result->b_bdev = xfs_find_bdev_for_inode(inode); 1350 1351 /* 1352 * If we previously allocated a block out beyond eof and we are now 1353 * coming back to use it then we will need to flag it as new even if it 1354 * has a disk address. 1355 * 1356 * With sub-block writes into unwritten extents we also need to mark 1357 * the buffer as new so that the unwritten parts of the buffer gets 1358 * correctly zeroed. 1359 */ 1360 if (create && 1361 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || 1362 (offset >= i_size_read(inode)) || 1363 (new || ISUNWRITTEN(&imap)))) 1364 set_buffer_new(bh_result); 1365 1366 if (imap.br_startblock == DELAYSTARTBLOCK) { 1367 BUG_ON(direct); 1368 if (create) { 1369 set_buffer_uptodate(bh_result); 1370 set_buffer_mapped(bh_result); 1371 set_buffer_delay(bh_result); 1372 } 1373 } 1374 1375 /* 1376 * If this is O_DIRECT or the mpage code calling tell them how large 1377 * the mapping is, so that we can avoid repeated get_blocks calls. 1378 */ 1379 if (direct || size > (1 << inode->i_blkbits)) { 1380 xfs_off_t mapping_size; 1381 1382 mapping_size = imap.br_startoff + imap.br_blockcount - iblock; 1383 mapping_size <<= inode->i_blkbits; 1384 1385 ASSERT(mapping_size > 0); 1386 if (mapping_size > size) 1387 mapping_size = size; 1388 if (mapping_size > LONG_MAX) 1389 mapping_size = LONG_MAX; 1390 1391 bh_result->b_size = mapping_size; 1392 } 1393 1394 return 0; 1395} 1396 1397int 1398xfs_get_blocks( 1399 struct inode *inode, 1400 sector_t iblock, 1401 struct buffer_head *bh_result, 1402 int create) 1403{ 1404 return __xfs_get_blocks(inode, iblock, bh_result, create, 0); 1405} 1406 1407STATIC int 1408xfs_get_blocks_direct( 1409 struct inode *inode, 1410 sector_t iblock, 1411 struct buffer_head *bh_result, 1412 int create) 1413{ 1414 return __xfs_get_blocks(inode, iblock, bh_result, create, 1); 1415} 1416 1417/* 1418 * Complete a direct I/O write request. 1419 * 1420 * If the private argument is non-NULL __xfs_get_blocks signals us that we 1421 * need to issue a transaction to convert the range from unwritten to written 1422 * extents. In case this is regular synchronous I/O we just call xfs_end_io 1423 * to do this and we are done. But in case this was a successfull AIO 1424 * request this handler is called from interrupt context, from which we 1425 * can't start transactions. In that case offload the I/O completion to 1426 * the workqueues we also use for buffered I/O completion. 1427 */ 1428STATIC void 1429xfs_end_io_direct_write( 1430 struct kiocb *iocb, 1431 loff_t offset, 1432 ssize_t size, 1433 void *private, 1434 int ret, 1435 bool is_async) 1436{ 1437 struct xfs_ioend *ioend = iocb->private; 1438 1439 /* 1440 * blockdev_direct_IO can return an error even after the I/O 1441 * completion handler was called. Thus we need to protect 1442 * against double-freeing. 1443 */ 1444 iocb->private = NULL; 1445 1446 ioend->io_offset = offset; 1447 ioend->io_size = size; 1448 if (private && size > 0) 1449 ioend->io_type = IO_UNWRITTEN; 1450 1451 if (is_async) { 1452 /* 1453 * If we are converting an unwritten extent we need to delay 1454 * the AIO completion until after the unwrittent extent 1455 * conversion has completed, otherwise do it ASAP. 1456 */ 1457 if (ioend->io_type == IO_UNWRITTEN) { 1458 ioend->io_iocb = iocb; 1459 ioend->io_result = ret; 1460 } else { 1461 aio_complete(iocb, ret, 0); 1462 } 1463 xfs_finish_ioend(ioend); 1464 } else { 1465 xfs_finish_ioend_sync(ioend); 1466 } 1467} 1468 1469STATIC ssize_t 1470xfs_vm_direct_IO( 1471 int rw, 1472 struct kiocb *iocb, 1473 const struct iovec *iov, 1474 loff_t offset, 1475 unsigned long nr_segs) 1476{ 1477 struct inode *inode = iocb->ki_filp->f_mapping->host; 1478 struct block_device *bdev = xfs_find_bdev_for_inode(inode); 1479 ssize_t ret; 1480 1481 if (rw & WRITE) { 1482 iocb->private = xfs_alloc_ioend(inode, IO_NEW); 1483 1484 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, 1485 offset, nr_segs, 1486 xfs_get_blocks_direct, 1487 xfs_end_io_direct_write, NULL, 0); 1488 if (ret != -EIOCBQUEUED && iocb->private) 1489 xfs_destroy_ioend(iocb->private); 1490 } else { 1491 ret = __blockdev_direct_IO(rw, iocb, inode, bdev, iov, 1492 offset, nr_segs, 1493 xfs_get_blocks_direct, 1494 NULL, NULL, 0); 1495 } 1496 1497 return ret; 1498} 1499 1500STATIC void 1501xfs_vm_write_failed( 1502 struct address_space *mapping, 1503 loff_t to) 1504{ 1505 struct inode *inode = mapping->host; 1506 1507 if (to > inode->i_size) { 1508 struct iattr ia = { 1509 .ia_valid = ATTR_SIZE | ATTR_FORCE, 1510 .ia_size = inode->i_size, 1511 }; 1512 xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK); 1513 } 1514} 1515 1516STATIC int 1517xfs_vm_write_begin( 1518 struct file *file, 1519 struct address_space *mapping, 1520 loff_t pos, 1521 unsigned len, 1522 unsigned flags, 1523 struct page **pagep, 1524 void **fsdata) 1525{ 1526 int ret; 1527 1528 ret = block_write_begin(mapping, pos, len, flags | AOP_FLAG_NOFS, 1529 pagep, xfs_get_blocks); 1530 if (unlikely(ret)) 1531 xfs_vm_write_failed(mapping, pos + len); 1532 return ret; 1533} 1534 1535STATIC int 1536xfs_vm_write_end( 1537 struct file *file, 1538 struct address_space *mapping, 1539 loff_t pos, 1540 unsigned len, 1541 unsigned copied, 1542 struct page *page, 1543 void *fsdata) 1544{ 1545 int ret; 1546 1547 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 1548 if (unlikely(ret < len)) 1549 xfs_vm_write_failed(mapping, pos + len); 1550 return ret; 1551} 1552 1553STATIC sector_t 1554xfs_vm_bmap( 1555 struct address_space *mapping, 1556 sector_t block) 1557{ 1558 struct inode *inode = (struct inode *)mapping->host; 1559 struct xfs_inode *ip = XFS_I(inode); 1560 1561 trace_xfs_vm_bmap(XFS_I(inode)); 1562 xfs_ilock(ip, XFS_IOLOCK_SHARED); 1563 xfs_flush_pages(ip, (xfs_off_t)0, -1, 0, FI_REMAPF); 1564 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 1565 return generic_block_bmap(mapping, block, xfs_get_blocks); 1566} 1567 1568STATIC int 1569xfs_vm_readpage( 1570 struct file *unused, 1571 struct page *page) 1572{ 1573 return mpage_readpage(page, xfs_get_blocks); 1574} 1575 1576STATIC int 1577xfs_vm_readpages( 1578 struct file *unused, 1579 struct address_space *mapping, 1580 struct list_head *pages, 1581 unsigned nr_pages) 1582{ 1583 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); 1584} 1585 1586const struct address_space_operations xfs_address_space_operations = { 1587 .readpage = xfs_vm_readpage, 1588 .readpages = xfs_vm_readpages, 1589 .writepage = xfs_vm_writepage, 1590 .writepages = xfs_vm_writepages, 1591 .sync_page = block_sync_page, 1592 .releasepage = xfs_vm_releasepage, 1593 .invalidatepage = xfs_vm_invalidatepage, 1594 .write_begin = xfs_vm_write_begin, 1595 .write_end = xfs_vm_write_end, 1596 .bmap = xfs_vm_bmap, 1597 .direct_IO = xfs_vm_direct_IO, 1598 .migratepage = buffer_migrate_page, 1599 .is_partially_uptodate = block_is_partially_uptodate, 1600 .error_remove_page = generic_error_remove_page, 1601}; 1602