Lines Matching refs:page

107 		 * free memory is lower than watermark or cached page count
108 * exceed threshold, deny caching compress page.
123 static void clear_node_page_dirty(struct page *page)
125 if (PageDirty(page)) {
126 f2fs_clear_page_cache_dirty_tag(page);
127 clear_page_dirty_for_io(page);
128 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
130 ClearPageUptodate(page);
133 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
138 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
140 struct page *src_page;
141 struct page *dst_page;
149 /* get current nat block page with lock */
313 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
315 return NODE_MAPPING(sbi) == page->mapping &&
316 IS_DNODE(page) && is_cold_node(page);
328 struct page *page)
337 get_page(page);
338 fn->page = page;
351 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
358 if (fn->page == page) {
363 put_page(page);
554 struct page *page = NULL;
599 /* Fill node_info from nat page */
603 page = f2fs_get_meta_page(sbi, index);
604 if (IS_ERR(page))
605 return PTR_ERR(page);
607 nat_blk = (struct f2fs_nat_block *)page_address(page);
610 f2fs_put_page(page, 1);
625 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
764 struct page *npage[4];
765 struct page *parent = NULL;
937 struct page *page;
944 page = f2fs_get_node_page(sbi, dn->nid);
945 if (PTR_ERR(page) == -ENOENT)
947 else if (IS_ERR(page))
948 return PTR_ERR(page);
950 if (IS_INODE(page) || ino_of_node(page) != dn->inode->i_ino) {
952 dn->inode->i_ino, dn->nid, ino_of_node(page));
955 f2fs_put_page(page, 1);
960 dn->node_page = page;
965 f2fs_put_page(page, 1);
976 struct page *page;
988 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
989 if (IS_ERR(page)) {
990 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
991 return PTR_ERR(page);
994 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
996 rn = F2FS_NODE(page);
1006 if (set_nid(page, i, 0, false))
1020 if (set_nid(page, i, 0, false))
1032 dn->node_page = page;
1038 f2fs_put_page(page, 1);
1044 f2fs_put_page(page, 1);
1052 struct page *pages[2];
1122 struct page *page;
1132 page = f2fs_get_node_page(sbi, inode->i_ino);
1133 if (IS_ERR(page)) {
1134 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1135 return PTR_ERR(page);
1138 set_new_dnode(&dn, inode, page, NULL, 0);
1139 unlock_page(page);
1141 ri = F2FS_INODE(page);
1194 lock_page(page);
1195 BUG_ON(page->mapping != NODE_MAPPING(sbi));
1196 f2fs_wait_on_page_writeback(page, NODE, true, true);
1198 set_page_dirty(page);
1199 unlock_page(page);
1206 f2fs_put_page(page, 0);
1211 /* caller must lock inode page */
1217 struct page *npage;
1286 struct page *f2fs_new_inode_page(struct inode *inode)
1290 /* allocate inode page for new inode */
1293 /* caller should f2fs_put_page(page, 1); */
1297 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1301 struct page *page;
1307 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1308 if (!page)
1334 f2fs_wait_on_page_writeback(page, NODE, true, true);
1335 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1336 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1337 if (!PageUptodate(page))
1338 SetPageUptodate(page);
1339 if (set_page_dirty(page))
1347 return page;
1350 clear_node_page_dirty(page);
1351 f2fs_put_page(page, 1);
1357 * 0: f2fs_put_page(page, 0)
1358 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1360 static int read_node_page(struct page *page, blk_opf_t op_flags)
1362 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1369 .page = page,
1374 if (PageUptodate(page)) {
1375 if (!f2fs_inode_chksum_verify(sbi, page)) {
1376 ClearPageUptodate(page);
1382 err = f2fs_get_node_info(sbi, page->index, &ni, false);
1388 ClearPageUptodate(page);
1403 * Readahead a node page
1407 struct page *apage;
1427 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1428 struct page *parent, int start)
1430 struct page *page;
1438 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1439 if (!page)
1442 err = read_node_page(page, 0);
1453 lock_page(page);
1455 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1456 f2fs_put_page(page, 1);
1460 if (unlikely(!PageUptodate(page))) {
1465 if (!f2fs_inode_chksum_verify(sbi, page)) {
1470 if (likely(nid == nid_of_node(page)))
1471 return page;
1474 nid, nid_of_node(page), ino_of_node(page),
1475 ofs_of_node(page), cpver_of_node(page),
1476 next_blkaddr_of_node(page));
1481 ClearPageUptodate(page);
1485 f2fs_handle_page_eio(sbi, page->index, NODE);
1486 f2fs_put_page(page, 1);
1490 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1495 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1506 struct page *page;
1514 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1516 if (!page)
1519 if (!PageUptodate(page))
1522 if (!PageDirty(page))
1525 if (!clear_page_dirty_for_io(page))
1528 ret = f2fs_write_inline_data(inode, page);
1532 set_page_dirty(page);
1534 f2fs_put_page(page, 1);
1539 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1543 struct page *last_page = NULL;
1555 struct page *page = &fbatch.folios[i]->page;
1563 if (!IS_DNODE(page) || !is_cold_node(page))
1565 if (ino_of_node(page) != ino)
1568 lock_page(page);
1570 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1572 unlock_page(page);
1575 if (ino_of_node(page) != ino)
1578 if (!PageDirty(page)) {
1586 get_page(page);
1587 last_page = page;
1588 unlock_page(page);
1596 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1600 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1605 .ino = ino_of_node(page),
1609 .page = page,
1617 trace_f2fs_writepage(page, NODE);
1623 ClearPageUptodate(page);
1625 unlock_page(page);
1634 IS_DNODE(page) && is_cold_node(page))
1637 /* get old block addr of this node page */
1638 nid = nid_of_node(page);
1639 f2fs_bug_on(sbi, page->index != nid);
1651 /* This page is already truncated */
1653 ClearPageUptodate(page);
1656 unlock_page(page);
1671 if (f2fs_in_warm_node_list(sbi, page)) {
1672 seq = f2fs_add_fsync_node_entry(sbi, page);
1677 set_page_writeback(page);
1681 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1686 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE);
1690 unlock_page(page);
1704 redirty_page_for_writepage(wbc, page);
1708 int f2fs_move_node_page(struct page *node_page, int gc_type)
1735 /* set page dirty and write it */
1746 static int f2fs_write_node_page(struct page *page,
1749 return __write_node_page(page, false, NULL, wbc, false,
1760 struct page *last_page = NULL;
1781 struct page *page = &fbatch.folios[i]->page;
1791 if (!IS_DNODE(page) || !is_cold_node(page))
1793 if (ino_of_node(page) != ino)
1796 lock_page(page);
1798 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1800 unlock_page(page);
1803 if (ino_of_node(page) != ino)
1806 if (!PageDirty(page) && page != last_page) {
1811 f2fs_wait_on_page_writeback(page, NODE, true, true);
1813 set_fsync_mark(page, 0);
1814 set_dentry_mark(page, 0);
1816 if (!atomic || page == last_page) {
1817 set_fsync_mark(page, 1);
1819 if (IS_INODE(page)) {
1822 f2fs_update_inode(inode, page);
1823 set_dentry_mark(page,
1827 if (!PageDirty(page))
1828 set_page_dirty(page);
1831 if (!clear_page_dirty_for_io(page))
1834 ret = __write_node_page(page, atomic &&
1835 page == last_page,
1839 unlock_page(page);
1846 if (page == last_page) {
1847 f2fs_put_page(page, 0);
1897 static bool flush_dirty_inode(struct page *page)
1899 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1901 nid_t ino = ino_of_node(page);
1907 f2fs_update_inode(inode, page);
1908 unlock_page(page);
1928 struct page *page = &fbatch.folios[i]->page;
1930 if (!IS_INODE(page))
1933 lock_page(page);
1935 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1937 unlock_page(page);
1941 if (!PageDirty(page)) {
1947 if (page_private_inline(page)) {
1948 clear_page_private_inline(page);
1949 unlock_page(page);
1950 flush_inline_data(sbi, ino_of_node(page));
1953 unlock_page(page);
1982 struct page *page = &fbatch.folios[i]->page;
1998 if (step == 0 && IS_DNODE(page))
2000 if (step == 1 && (!IS_DNODE(page) ||
2001 is_cold_node(page)))
2003 if (step == 2 && (!IS_DNODE(page) ||
2004 !is_cold_node(page)))
2008 lock_page(page);
2009 else if (!trylock_page(page))
2012 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
2014 unlock_page(page);
2018 if (!PageDirty(page)) {
2028 if (page_private_inline(page)) {
2029 clear_page_private_inline(page);
2030 unlock_page(page);
2031 flush_inline_data(sbi, ino_of_node(page));
2036 if (IS_INODE(page) && flush_dirty_inode(page))
2039 f2fs_wait_on_page_writeback(page, NODE, true, true);
2041 if (!clear_page_dirty_for_io(page))
2044 set_fsync_mark(page, 0);
2045 set_dentry_mark(page, 0);
2047 ret = __write_node_page(page, false, &submitted,
2050 unlock_page(page);
2086 struct page *page;
2103 page = fn->page;
2104 get_page(page);
2107 f2fs_wait_on_page_writeback(page, NODE, true, false);
2109 put_page(page);
2164 trace_f2fs_set_page_dirty(&folio->page, NODE);
2169 if (IS_INODE(&folio->page))
2170 f2fs_inode_chksum_set(F2FS_M_SB(mapping), &folio->page);
2174 set_page_private_reference(&folio->page);
2382 struct page *nat_page, nid_t start_nid)
2505 struct page *page = get_current_nat_page(sbi, nid);
2507 if (IS_ERR(page)) {
2508 ret = PTR_ERR(page);
2510 ret = scan_nat_page(sbi, page, nid);
2511 f2fs_put_page(page, 1);
2687 int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2691 struct page *ipage;
2698 ri = F2FS_INODE(page);
2713 src_addr = inline_xattr_addr(inode, page);
2724 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2731 struct page *xpage;
2761 /* 3: update and set xattr node page dirty */
2762 if (page) {
2763 memcpy(F2FS_NODE(xpage), F2FS_NODE(page),
2772 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2775 nid_t ino = ino_of_node(page);
2777 struct page *ipage;
2801 src = F2FS_INODE(page);
2863 struct page *page = f2fs_get_tmp_page(sbi, idx);
2865 if (IS_ERR(page))
2866 return PTR_ERR(page);
2868 rn = F2FS_NODE(page);
2873 f2fs_put_page(page, 1);
2959 struct page *page)
2963 struct f2fs_nat_block *nat_blk = page_address(page);
3019 struct page *page = NULL;
3024 * #2, flush nat entries to nat page.
3033 page = get_next_nat_page(sbi, start_nid);
3034 if (IS_ERR(page))
3035 return PTR_ERR(page);
3037 nat_blk = page_address(page);
3073 update_nat_bits(sbi, start_nid, page);
3074 f2fs_put_page(page, 1);
3172 struct page *page;
3174 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
3175 if (IS_ERR(page))
3176 return PTR_ERR(page);
3179 page_address(page), F2FS_BLKSIZE);
3180 f2fs_put_page(page, 1);