Lines Matching refs:page

51 bool f2fs_is_cp_guaranteed(struct page *page)
53 struct address_space *mapping = page->mapping;
69 page_private_gcing(page))
74 static enum count_type __read_io_type(struct page *page)
76 struct address_space *mapping = page_file_mapping(page);
133 * things for each compressed page here: call f2fs_end_read_compressed_page()
136 * release the bio's reference to the decompress_io_ctx of the page's cluster.
145 struct page *page = bv->bv_page;
147 if (f2fs_is_compressed_page(page)) {
149 f2fs_end_read_compressed_page(page, true, 0,
151 f2fs_put_page_dic(page, in_task);
156 ClearPageUptodate(page);
158 SetPageUptodate(page);
159 dec_page_count(F2FS_P_SB(page), __read_io_type(page));
160 unlock_page(page);
194 struct page *page = bv->bv_page;
196 if (!f2fs_is_compressed_page(page) &&
197 !fsverity_verify_page(page)) {
232 * remaining page was read by @ctx->bio.
236 * that the bio includes at least one compressed page. The actual decompression
248 struct page *page = bv->bv_page;
250 if (f2fs_is_compressed_page(page))
251 f2fs_end_read_compressed_page(page, false, blkaddr,
338 struct page *page = bvec->bv_page;
339 enum count_type type = WB_DATA_TYPE(page, false);
341 fscrypt_finalize_bounce_page(&page);
344 if (f2fs_is_compressed_page(page)) {
345 f2fs_compress_write_end_io(bio, page);
351 mapping_set_error(page->mapping, -EIO);
357 f2fs_bug_on(sbi, page->mapping == NODE_MAPPING(sbi) &&
358 page->index != nid_of_node(page));
361 if (f2fs_in_warm_node_list(sbi, page))
362 f2fs_del_fsync_node_entry(sbi, page);
363 clear_page_private_gcing(page);
364 end_page_writeback(page);
545 struct page *page, nid_t ino)
553 if (!inode && !page && !ino)
557 struct page *target = bvec->bv_page;
572 if (page && page == target)
638 struct inode *inode, struct page *page,
650 ret = __has_merged_page(io->bio, inode, page, ino);
668 struct inode *inode, struct page *page,
671 __submit_merged_write_cond(sbi, inode, page, ino, type, false);
682 * Fill the locked page with data located in the block address.
683 * A caller needs to unlock the page on failure.
688 struct page *page = fio->encrypted_page ?
689 fio->encrypted_page : fio->page;
696 trace_f2fs_submit_page_bio(page, fio);
701 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
702 fio->page->index, fio, GFP_NOIO);
704 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
710 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
713 __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
753 struct page *page, enum temp_type temp)
762 if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE)
777 struct page *page)
800 fio->page->mapping->host,
801 fio->page->index, fio) &&
802 bio_add_page(*bio, page, PAGE_SIZE, 0) ==
808 /* page can't be merged into bio; submit the bio */
825 struct bio **bio, struct page *page)
831 f2fs_bug_on(sbi, !target && !page);
847 page, 0);
864 page, 0);
885 struct page *page = fio->encrypted_page ?
886 fio->encrypted_page : fio->page;
892 trace_f2fs_submit_page_bio(page, fio);
900 f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host,
901 fio->page->index, fio, GFP_NOIO);
903 add_bio_entry(fio->sbi, bio, page, fio->temp);
905 if (add_ipu_page(fio, &bio, page))
910 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
912 inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
945 struct page *bio_page;
980 bio_page = fio->page;
991 !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host,
997 f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host,
1008 wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
1012 trace_f2fs_submit_page_write(fio->page, fio);
1083 static int f2fs_submit_page_read(struct inode *inode, struct page *page,
1091 page->index, for_write);
1095 /* wait for GCed page writeback via META_MAPPING */
1098 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
1123 * update block addresses in the node page
1200 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
1206 struct page *page;
1209 page = f2fs_grab_cache_page(mapping, index, for_write);
1210 if (!page)
1246 if (PageUptodate(page)) {
1247 unlock_page(page);
1248 return page;
1252 * A new dentry page is allocated but not able to be written, since its
1253 * new inode page couldn't be allocated due to -ENOSPC.
1259 zero_user_segment(page, 0, PAGE_SIZE);
1260 if (!PageUptodate(page))
1261 SetPageUptodate(page);
1262 unlock_page(page);
1263 return page;
1266 err = f2fs_submit_page_read(inode, page, dn.data_blkaddr,
1270 return page;
1273 f2fs_put_page(page, 1);
1277 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index,
1281 struct page *page;
1283 page = find_get_page(mapping, index);
1284 if (page && PageUptodate(page))
1285 return page;
1286 f2fs_put_page(page, 0);
1288 page = f2fs_get_read_data_page(inode, index, 0, false, next_pgofs);
1289 if (IS_ERR(page))
1290 return page;
1292 if (PageUptodate(page))
1293 return page;
1295 wait_on_page_locked(page);
1296 if (unlikely(!PageUptodate(page))) {
1297 f2fs_put_page(page, 0);
1300 return page;
1306 * whether this page exists or not.
1308 struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index,
1312 struct page *page;
1314 page = f2fs_get_read_data_page(inode, index, 0, for_write, NULL);
1315 if (IS_ERR(page))
1316 return page;
1319 lock_page(page);
1320 if (unlikely(page->mapping != mapping || !PageUptodate(page))) {
1321 f2fs_put_page(page, 1);
1324 return page;
1328 * Caller ensures that this data page is never allocated.
1329 * A new zero-filled data page is allocated in the page cache.
1336 struct page *f2fs_get_new_data_page(struct inode *inode,
1337 struct page *ipage, pgoff_t index, bool new_i_size)
1340 struct page *page;
1344 page = f2fs_grab_cache_page(mapping, index, true);
1345 if (!page) {
1357 f2fs_put_page(page, 1);
1363 if (PageUptodate(page))
1367 zero_user_segment(page, 0, PAGE_SIZE);
1368 if (!PageUptodate(page))
1369 SetPageUptodate(page);
1371 f2fs_put_page(page, 1);
1375 page = f2fs_get_lock_data_page(inode, index, true);
1376 if (IS_ERR(page))
1377 return page;
1383 return page;
1543 /* it only supports block size == page size */
1551 /* When reading holes, we need its node page */
1672 /* preallocate blocks in batch for one dnode page */
1801 struct page *page;
1811 page = f2fs_grab_cache_page(NODE_MAPPING(sbi),
1813 if (!page)
1818 f2fs_put_page(page, 1);
1830 f2fs_put_page(page, 1);
1844 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), xnid, false);
1845 if (!page)
1850 f2fs_put_page(page, 1);
1857 f2fs_put_page(page, 1);
2045 static int f2fs_read_single_page(struct inode *inode, struct page *page,
2060 block_in_file = (sector_t)page_index(page);
2067 /* just zeroing out page which is beyond EOF */
2080 * done with this page.
2091 SetPageMappedToDisk(page);
2100 zero_user_segment(page, 0, PAGE_SIZE);
2101 if (f2fs_need_verity(inode, page->index) &&
2102 !fsverity_verify_page(page)) {
2106 if (!PageUptodate(page))
2107 SetPageUptodate(page);
2108 unlock_page(page);
2113 * This page will go to BIO. Do we need to send this
2118 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2125 is_readahead ? REQ_RAHEAD : 0, page->index,
2135 * If the page is under writeback, we need to wait for
2140 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2177 struct page *page = cc->rpages[i];
2179 if (!page)
2181 if ((sector_t)page->index >= last_block_in_file) {
2182 zero_user_segment(page, 0, PAGE_SIZE);
2183 if (!PageUptodate(page))
2184 SetPageUptodate(page);
2185 } else if (!PageUptodate(page)) {
2188 unlock_page(page);
2190 put_page(page);
2250 struct page *page = dic->cpages[i];
2260 if (f2fs_load_compressed_page(sbi, page, blkaddr)) {
2270 !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) {
2279 page->index, for_write);
2289 if (bio_add_page(bio, page, blocksize, 0) < blocksize)
2327 struct readahead_control *rac, struct page *page)
2360 page = readahead_page(rac);
2361 prefetchw(&page->flags);
2367 if (!f2fs_cluster_can_merge_page(&cc, page->index)) {
2378 page->index >> cc.log_cluster_size) {
2382 ret = f2fs_is_compressed_cluster(inode, page->index);
2387 page->index >> cc.log_cluster_size;
2397 f2fs_compress_ctx_add_page(&cc, page);
2404 ret = f2fs_read_single_page(inode, page, max_nr_pages, &map,
2410 zero_user_segment(page, 0, PAGE_SIZE);
2411 unlock_page(page);
2417 put_page(page);
2421 /* last page */
2439 struct page *page = &folio->page;
2440 struct inode *inode = page_file_mapping(page)->host;
2443 trace_f2fs_readpage(page, DATA);
2446 unlock_page(page);
2452 ret = f2fs_read_inline_data(inode, page);
2454 ret = f2fs_mpage_readpages(inode, NULL, page);
2476 struct inode *inode = fio->page->mapping->host;
2477 struct page *mpage, *page;
2483 page = fio->compressed_page ? fio->compressed_page : fio->page;
2489 fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page,
2595 if (page_private_gcing(fio->page))
2606 struct inode *inode = fio->page->mapping->host;
2616 struct page *page = fio->page;
2617 struct inode *inode = page->mapping->host;
2630 f2fs_lookup_read_extent_cache_block(inode, page->index,
2641 /* Deadlock due to between page->lock and f2fs_lock_op */
2645 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
2651 /* This page is already truncated */
2653 ClearPageUptodate(page);
2654 clear_page_private_gcing(page);
2665 /* wait for GCed page writeback via META_MAPPING */
2680 set_page_writeback(page);
2688 if (PageWriteback(page))
2689 end_page_writeback(page);
2693 trace_f2fs_do_write_data_page(fio->page, IPU);
2715 set_page_writeback(page);
2722 trace_f2fs_do_write_data_page(page, OPU);
2732 int f2fs_write_single_data_page(struct page *page, int *submitted,
2740 struct inode *inode = page->mapping->host;
2745 loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
2757 .page = page,
2769 trace_f2fs_writepage(page, DATA);
2773 mapping_set_error(page->mapping, -EIO);
2791 if (page->index < end_index ||
2798 * this page does not have to be written to disk.
2801 if ((page->index >= end_index + 1) || !offset)
2804 zero_user_segment(page, offset, PAGE_SIZE);
2834 err = f2fs_write_inline_data(inode, page);
2864 ClearPageUptodate(page);
2865 clear_page_private_gcing(page);
2869 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, DATA);
2874 unlock_page(page);
2892 redirty_page_for_writepage(wbc, page);
2901 unlock_page(page);
2905 static int f2fs_write_data_page(struct page *page,
2909 struct inode *inode = page->mapping->host;
2915 if (f2fs_is_compressed_cluster(inode, page->index)) {
2916 redirty_page_for_writepage(wbc, page);
2923 return f2fs_write_single_data_page(page, NULL, NULL, NULL,
2928 * This function was copied from write_cache_pages from mm/page-writeback.c.
2929 * The major change is making write step of cold data page separately from
2930 * warm/hot data page.
2938 struct page *pages_local[F2FS_ONSTACK_PAGES];
2939 struct page **pages = pages_local;
2976 pages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
3039 struct page *page = pages[i];
3040 struct folio *folio = page_folio(page);
3047 struct page *pagep;
3120 f2fs_wait_on_page_writeback(&folio->page, DATA, true, true);
3129 f2fs_compress_ctx_add_page(&cc, &folio->page);
3133 ret = f2fs_write_single_data_page(&folio->page,
3252 /* skip writing if there is no dirty page in this inode */
3341 struct page *page, loff_t pos, unsigned len,
3344 struct inode *inode = page->mapping->host;
3345 pgoff_t index = page->index;
3347 struct page *ipage;
3353 * If a whole page is being written and we already preallocated all the
3382 f2fs_do_read_inline_data(page, ipage);
3388 err = f2fs_convert_inline_page(&dn, page);
3427 struct page *ipage;
3455 struct page *ipage;
3481 struct page *page, loff_t pos, unsigned int len,
3484 struct inode *inode = page->mapping->host;
3486 pgoff_t index = page->index;
3524 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
3528 struct page *page = NULL;
3543 * We should check this at this moment to avoid deadlock on inode page
3544 * and #0 page. The locking rule for inline_data conversion should be:
3545 * lock_page(page #0) -> lock_page(inode_page)
3578 page = f2fs_pagecache_get_page(mapping, index,
3580 if (!page) {
3587 *pagep = page;
3590 err = prepare_atomic_write_begin(sbi, page, pos, len,
3593 err = prepare_write_begin(sbi, page, pos, len,
3600 unlock_page(page);
3602 lock_page(page);
3603 if (page->mapping != mapping) {
3604 /* The page got truncated from under us */
3605 f2fs_put_page(page, 1);
3610 f2fs_wait_on_page_writeback(page, DATA, false, true);
3612 if (len == PAGE_SIZE || PageUptodate(page))
3617 zero_user_segment(page, len, PAGE_SIZE);
3622 zero_user_segment(page, 0, PAGE_SIZE);
3623 SetPageUptodate(page);
3631 F2FS_I(inode)->cow_inode : inode, page,
3636 lock_page(page);
3637 if (unlikely(page->mapping != mapping)) {
3638 f2fs_put_page(page, 1);
3641 if (unlikely(!PageUptodate(page))) {
3649 f2fs_put_page(page, 1);
3657 struct page *page, void *fsdata)
3659 struct inode *inode = page->mapping->host;
3668 if (!PageUptodate(page)) {
3672 SetPageUptodate(page);
3678 f2fs_compress_write_end(inode, fsdata, page->index, copied);
3691 set_page_dirty(page);
3701 f2fs_put_page(page, 1);
3725 clear_page_private_all(&folio->page);
3734 clear_page_private_all(&folio->page);
3743 trace_f2fs_set_page_dirty(&folio->page, DATA);
3858 struct page *page;
3861 page = f2fs_get_lock_data_page(inode, blkidx, true);
3862 if (IS_ERR(page)) {
3864 ret = PTR_ERR(page);
3868 set_page_dirty(page);
3869 f2fs_put_page(page, 1);
3977 if (cur_lblock) { /* exclude the header page */
4083 void f2fs_clear_page_cache_dirty_tag(struct page *page)
4085 struct folio *folio = page_folio(page);