Lines Matching refs:page

30  * The page->private field is used to reference a struct
31 * ceph_snap_context for _every_ dirty page. This indicates which
32 * snapshot the page was logically dirtied in, and thus which snap
37 * i_wrbuffer_ref == i_wrbuffer_ref_head == the dirty page count.
57 * Invalidate and so forth must take care to ensure the dirty page
69 static inline struct ceph_snap_context *page_snap_context(struct page *page)
71 if (PagePrivate(page))
72 return (void *)page->private;
77 * Dirty a page. Optimistically adjust accounting, on the assumption
147 doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
154 doutc(cl, "%llx.%llx idx %lu full dirty page\n",
377 * instead of page arrays, and we don't have that as of yet. Once the
382 struct page **pages;
392 /* should always give us a page-aligned read */
595 struct page *page, u64 start)
603 snapc = page_snap_context(ceph_fscrypt_pagecache_page(page));
618 if (end > ceph_fscrypt_page_offset(page) + thp_size(page))
619 end = ceph_fscrypt_page_offset(page) + thp_size(page);
621 if (ret && fscrypt_is_bounce_page(page))
627 * Write a single page, but leave the page locked.
630 * dirty page accounting (i.e., page is no longer dirty).
632 static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
634 struct folio *folio = page_folio(page);
635 struct inode *inode = page->mapping->host;
640 loff_t page_off = page_offset(page);
642 loff_t len = thp_size(page);
648 struct page *bounce_page = NULL;
650 doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
651 page->index);
657 snapc = page_snap_context(page);
659 doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
660 page);
665 doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
666 ceph_vinop(inode), page, snapc);
670 redirty_page_for_writepage(wbc, page);
675 /* is this a partial page at end of file? */
687 doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
688 ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
701 redirty_page_for_writepage(wbc, page);
708 set_page_writeback(page);
712 bounce_page = fscrypt_encrypt_pagecache_blocks(page,
716 redirty_page_for_writepage(wbc, page);
717 end_page_writeback(page);
724 WARN_ON_ONCE(len > thp_size(page));
726 bounce_page ? &bounce_page : &page, wlen, 0,
749 doutc(cl, "%llx.%llx interrupted page %p\n",
750 ceph_vinop(inode), page);
751 redirty_page_for_writepage(wbc, page);
752 end_page_writeback(page);
757 doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
758 ceph_vinop(inode), err, page);
762 doutc(cl, "%llx.%llx cleaned page %p\n",
763 ceph_vinop(inode), page);
766 oldest = detach_page_private(page);
768 end_page_writeback(page);
770 ceph_put_snap_context(snapc); /* page's reference */
779 static int ceph_writepage(struct page *page, struct writeback_control *wbc)
782 struct inode *inode = page->mapping->host;
788 redirty_page_for_writepage(wbc, page);
792 err = writepage_nounlock(page, wbc);
795 * to prevent caller from setting mapping/page error */
798 unlock_page(page);
807 * page error bits.
815 struct page *page;
836 * We lost the cache cap, need to truncate the page before
838 * page truncation thread, possibly losing some data that
861 page = osd_data->pages[j];
862 if (fscrypt_is_bounce_page(page)) {
863 page = fscrypt_pagecache_page(page);
865 osd_data->pages[j] = page;
867 BUG_ON(!page);
868 WARN_ON(!PageUptodate(page));
875 ceph_put_snap_context(detach_page_private(page));
876 end_page_writeback(page);
877 doutc(cl, "unlocking %p\n", page);
881 page_folio(page));
883 unlock_page(page);
1006 struct page **pages = NULL, **data_pages;
1007 struct page *page;
1021 page = &fbatch.folios[i]->page;
1022 doutc(cl, "? %p idx %lu\n", page, page->index);
1024 lock_page(page); /* first page */
1025 else if (!trylock_page(page))
1029 if (unlikely(!PageDirty(page)) ||
1030 unlikely(page->mapping != mapping)) {
1031 doutc(cl, "!dirty or !mapping %p\n", page);
1032 unlock_page(page);
1036 pgsnapc = page_snap_context(page);
1038 doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
1044 unlock_page(page);
1047 if (page_offset(page) >= ceph_wbc.i_size) {
1048 struct folio *folio = page_folio(page);
1060 if (strip_unit_end && (page->index > strip_unit_end)) {
1061 doutc(cl, "end of strip unit %p\n", page);
1062 unlock_page(page);
1065 if (PageWriteback(page)) {
1067 doutc(cl, "%p under writeback\n", page);
1068 unlock_page(page);
1071 doutc(cl, "waiting on writeback %p\n", page);
1072 wait_on_page_writeback(page);
1075 if (!clear_page_dirty_for_io(page)) {
1076 doutc(cl, "%p !clear_page_dirty_for_io\n", page);
1077 unlock_page(page);
1083 * the first locked page this time through,
1085 * allocate a page array
1093 offset = (u64)page_offset(page);
1101 strip_unit_end = page->index +
1116 } else if (page->index !=
1120 redirty_page_for_writepage(wbc, page);
1121 unlock_page(page);
1126 offset = (u64)page_offset(page);
1130 /* note position of first page in fbatch */
1131 doutc(cl, "%llx.%llx will write page %p idx %lu\n",
1132 ceph_vinop(inode), page, page->index);
1141 fscrypt_encrypt_pagecache_blocks(page,
1149 /* better not fail on first page! */
1152 redirty_page_for_writepage(wbc, page);
1153 unlock_page(page);
1158 pages[locked_pages++] = page;
1162 len += thp_size(page);
1170 /* shift unused page to beginning of fbatch */
1225 struct page *page = ceph_fscrypt_pagecache_page(pages[i]);
1227 u64 cur_offset = page_offset(page);
1229 * Discontinuity in page range? Ceph can handle that by just passing
1256 set_page_writeback(page);
1257 len += thp_size(page);
1267 u64 min_len = len + 1 - thp_size(page);
1348 struct page *page;
1357 page = &fbatch.folios[i]->page;
1358 if (page_snap_context(page) != snapc)
1360 wait_on_page_writeback(page);
1400 * @page: page being dirtied
1402 * We are only allowed to write into/dirty a page if the page is
1407 * Must be called with page lock held.
1410 ceph_find_incompatible(struct page *page)
1412 struct inode *inode = page->mapping->host;
1417 doutc(cl, " %llx.%llx page %p is shutdown\n",
1418 ceph_vinop(inode), page);
1425 wait_on_page_writeback(page);
1427 snapc = page_snap_context(page);
1432 * this page is already dirty in another (older) snap
1439 doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
1440 ceph_vinop(inode), page, snapc);
1445 /* yay, writeable, do it now (without dropping page lock) */
1446 doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
1447 ceph_vinop(inode), page, snapc);
1448 if (clear_page_dirty_for_io(page)) {
1449 int r = writepage_nounlock(page, NULL);
1484 * We are only allowed to write into/dirty the page if the page is
1489 struct page **pagep, void **fsdata)
1502 *pagep = &folio->page;
1508 * except adjust dirty page accounting
1512 struct page *subpage, void *fsdata)
1629 struct page *page;
1632 page = find_or_create_page(mapping, 0,
1634 if (!page) {
1638 err = __ceph_do_getattr(inode, page,
1641 unlock_page(page);
1642 put_page(page);
1647 zero_user_segment(page, err, PAGE_SIZE);
1649 flush_dcache_page(page);
1650 SetPageUptodate(page);
1651 vmf->page = page;
1674 struct page *page = vmf->page;
1675 loff_t off = page_offset(page);
1692 if (off + thp_size(page) <= size)
1693 len = thp_size(page);
1695 len = offset_in_thp(page, size);
1712 /* Update time before taking page lock */
1719 lock_page(page);
1721 if (page_mkwrite_check_truncate(page, inode) < 0) {
1722 unlock_page(page);
1727 snapc = ceph_find_incompatible(page);
1729 /* success. we'll keep the page locked. */
1730 set_page_dirty(page);
1735 unlock_page(page);
1770 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
1775 struct page *page;
1778 page = locked_page;
1782 page = find_or_create_page(mapping, 0,
1785 if (!page)
1787 if (PageUptodate(page)) {
1788 unlock_page(page);
1789 put_page(page);
1798 void *kaddr = kmap_atomic(page);
1803 if (page != locked_page) {
1805 zero_user_segment(page, len, PAGE_SIZE);
1807 flush_dcache_page(page);
1809 SetPageUptodate(page);
1810 unlock_page(page);
1811 put_page(page);
1825 struct page *pages[1];
1980 struct page **pages;
2079 /* one page should be large enough for STAT data */