Lines Matching defs:folio

186 	struct folio *folio = page_folio(page);
193 btrfs_folio_clamp_set_ordered(fs_info, folio, start, len);
195 btrfs_folio_clamp_clear_dirty(fs_info, folio, start, len);
196 btrfs_folio_clamp_set_writeback(fs_info, folio, start, len);
199 btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
202 btrfs_folio_end_writer_lock(fs_info, folio, start, len);
223 struct folio *folio = fbatch.folios[i];
225 process_one_page(fs_info, &folio->page, locked_page,
274 struct folio *folio = fbatch.folios[i];
275 struct page *page = folio_page(folio, 0);
281 if (btrfs_folio_start_writer_lock(fs_info, folio, start,
286 btrfs_folio_end_writer_lock(fs_info, folio, start,
436 struct folio *folio = page_folio(page);
442 btrfs_folio_set_uptodate(fs_info, folio, start, len);
444 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
449 btrfs_subpage_end_reader(fs_info, folio, start, len);
457 * - filio_end_writeback() if there is no more pending io for the folio
472 struct folio *folio = fi.folio;
473 u64 start = folio_pos(folio) + fi.offset;
477 ASSERT(folio_order(folio) == 0);
490 folio_page(folio, 0), start, len, !error);
492 mapping_set_error(folio->mapping, error);
493 btrfs_folio_clear_writeback(fs_info, folio, start, len);
570 struct folio *folio = page_folio(page);
572 ASSERT(folio_test_locked(folio));
573 if (!btrfs_is_subpage(fs_info, folio->mapping))
576 ASSERT(folio_test_private(folio));
577 btrfs_subpage_start_reader(fs_info, folio, page_offset(page), PAGE_SIZE);
585 * - set the folio up to date if all extents in the tree are uptodate
587 * - unlock the folio if there are no other extents locked for it
603 struct folio *folio = fi.folio;
604 struct inode *inode = folio->mapping->host;
610 ASSERT(folio_order(folio) == 0);
618 * folio fails to read, blk_update_request() will advance
632 start = folio_pos(folio) + fi.offset;
638 pgoff_t end_index = i_size >> folio_shift(folio);
644 * Here we should only zero the range inside the folio,
649 if (folio_index(folio) == end_index && i_size <= end) {
650 u32 zero_start = max(offset_in_folio(folio, i_size),
651 offset_in_folio(folio, start));
652 u32 zero_len = offset_in_folio(folio, end) + 1 -
655 folio_zero_range(folio, zero_start, zero_len);
660 end_page_read(folio_page(folio, 0), uptodate, start, len);
681 int btrfs_alloc_folio_array(unsigned int nr_folios, struct folio **folio_array,
911 struct folio *folio,
923 if (folio->mapping)
924 lockdep_assert_held(&folio->mapping->i_private_lock);
927 if (!folio_test_private(folio))
928 folio_attach_private(folio, eb);
930 WARN_ON(folio_get_private(folio) != eb);
935 if (folio_test_private(folio)) {
942 folio_attach_private(folio, prealloc);
945 ret = btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_METADATA);
954 int set_folio_extent_mapped(struct folio *folio)
958 ASSERT(folio->mapping);
960 if (folio_test_private(folio))
963 fs_info = folio_to_fs_info(folio);
965 if (btrfs_is_subpage(fs_info, folio->mapping))
966 return btrfs_attach_subpage(fs_info, folio, BTRFS_SUBPAGE_DATA);
968 folio_attach_private(folio, (void *)EXTENT_FOLIO_PRIVATE);
974 struct folio *folio = page_folio(page);
979 if (!folio_test_private(folio))
984 return btrfs_detach_subpage(fs_info, folio);
986 folio_detach_private(folio);
1173 int btrfs_read_folio(struct file *file, struct folio *folio)
1175 struct page *page = &folio->page;
1298 struct folio *folio = page_folio(page);
1299 struct btrfs_subpage *subpage = folio_get_private(folio);
1468 struct folio *folio = page_folio(page);
1484 folio_invalidate(folio, 0, folio_size(folio));
1485 folio_unlock(folio);
1679 struct folio *folio = fi.folio;
1682 btrfs_folio_clear_writeback(fs_info, folio, start, len);
1738 struct folio *folio = eb->folios[0];
1741 folio_lock(folio);
1742 btrfs_subpage_set_writeback(fs_info, folio, eb->start, eb->len);
1743 if (btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start,
1745 folio_clear_dirty_for_io(folio);
1748 ret = bio_add_folio(&bbio->bio, folio, eb->len,
1749 eb->start - folio_pos(folio));
1751 wbc_account_cgroup_owner(wbc, folio_page(folio, 0), eb->len);
1752 folio_unlock(folio);
1757 struct folio *folio = eb->folios[i];
1760 folio_lock(folio);
1761 folio_clear_dirty_for_io(folio);
1762 folio_start_writeback(folio);
1763 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
1765 wbc_account_cgroup_owner(wbc, folio_page(folio, 0),
1767 wbc->nr_to_write -= folio_nr_pages(folio);
1768 folio_unlock(folio);
1791 struct folio *folio = page_folio(page);
1799 struct btrfs_subpage *subpage = folio_get_private(folio);
1809 if (!folio_test_private(folio)) {
1874 struct folio *folio = page_folio(page);
1878 if (!folio_test_private(folio))
1885 if (!folio_test_private(folio)) {
1890 eb = folio_get_private(folio);
1978 struct folio *folio = fbatch.folios[i];
1980 ret = submit_eb_page(&folio->page, &ctx);
2135 struct folio *folio = fbatch.folios[i];
2137 done_index = folio_next_index(folio);
2145 if (!folio_trylock(folio)) {
2147 folio_lock(folio);
2150 if (unlikely(folio->mapping != mapping)) {
2151 folio_unlock(folio);
2155 if (!folio_test_dirty(folio)) {
2157 folio_unlock(folio);
2162 if (folio_test_writeback(folio))
2164 folio_wait_writeback(folio);
2167 if (folio_test_writeback(folio) ||
2168 !folio_clear_dirty_for_io(folio)) {
2169 folio_unlock(folio);
2173 ret = __extent_writepage(&folio->page, bio_ctrl);
2323 * ranges corresponding to the folio, and then deletes any extent state
2327 struct folio *folio, size_t offset)
2330 u64 start = folio_pos(folio);
2331 u64 end = start + folio_size(folio) - 1;
2332 size_t blocksize = folio_to_fs_info(folio)->sectorsize;
2342 folio_wait_writeback(folio);
2809 * into the folio where our eb exists, and if we update ->start after
2811 * different offset in the folio than where we originally copied into.
3355 static bool folio_range_has_eb(struct btrfs_fs_info *fs_info, struct folio *folio)
3359 lockdep_assert_held(&folio->mapping->i_private_lock);
3361 if (folio_test_private(folio)) {
3362 subpage = folio_get_private(folio);
3375 static void detach_extent_buffer_folio(struct extent_buffer *eb, struct folio *folio)
3381 * For mapped eb, we're going to change the folio private, which should
3385 spin_lock(&folio->mapping->i_private_lock);
3387 if (!folio_test_private(folio)) {
3389 spin_unlock(&folio->mapping->i_private_lock);
3398 * only clear folio if it's still connected to
3401 if (folio_test_private(folio) && folio_get_private(folio) == eb) {
3403 BUG_ON(folio_test_dirty(folio));
3404 BUG_ON(folio_test_writeback(folio));
3406 folio_detach_private(folio);
3409 spin_unlock(&folio->mapping->i_private_lock);
3414 * For subpage, we can have dummy eb with folio private attached. In
3415 * this case, we can directly detach the private as such folio is only
3419 btrfs_detach_subpage(fs_info, folio);
3423 btrfs_folio_dec_eb_refs(fs_info, folio);
3426 * We can only detach the folio private if there are no other ebs in the
3429 if (!folio_range_has_eb(fs_info, folio))
3430 btrfs_detach_subpage(fs_info, folio);
3432 spin_unlock(&folio->mapping->i_private_lock);
3441 struct folio *folio = eb->folios[i];
3443 if (!folio)
3446 detach_extent_buffer_folio(eb, folio);
3448 /* One for when we allocated the folio. */
3449 folio_put(folio);
3509 struct folio *folio = new->folios[i];
3512 ret = attach_extent_buffer_folio(new, folio, NULL);
3517 WARN_ON(folio_test_dirty(folio));
3689 struct folio *folio = page_folio(page);
3701 if (!folio_test_private(folio))
3708 * just overwrite folio private.
3710 exists = folio_get_private(folio);
3715 folio_detach_private(folio);
3754 * Return -EAGAIN if the filemap has an existing folio but with different size
3765 struct folio *existing_folio;
3770 /* Caller should ensure the folio exists. */
3812 /* The extent buffer no longer exists, we can reuse the folio. */
3866 * Preallocate folio private for subpage case, so that we won't
3891 struct folio *folio;
3905 * - the new eb is using higher order folio
3909 * have higher order folio for the call.
3929 folio = eb->folios[i];
3930 eb->folio_size = folio_size(folio);
3931 eb->folio_shift = folio_shift(folio);
3934 ret = attach_extent_buffer_folio(eb, folio, prealloc);
3938 * detach_extent_buffer_page() won't release the folio private
3945 btrfs_folio_inc_eb_refs(fs_info, folio);
3948 WARN_ON(btrfs_folio_test_dirty(fs_info, folio, eb->start, eb->len));
3953 * At this stage, either we allocated a large folio, thus @i
3956 if (i && folio_page(eb->folios[i - 1], 0) + 1 != folio_page(folio, 0))
3959 if (!btrfs_folio_test_uptodate(fs_info, folio, eb->start, eb->len))
4012 * then attaching our eb to that folio. If we fail to insert our folio
4013 * we'll lookup the folio for that index, and grab that EB. We do not
4133 static void btree_clear_folio_dirty(struct folio *folio)
4135 ASSERT(folio_test_dirty(folio));
4136 ASSERT(folio_test_locked(folio));
4137 folio_clear_dirty_for_io(folio);
4138 xa_lock_irq(&folio->mapping->i_pages);
4139 if (!folio_test_dirty(folio))
4140 __xa_clear_mark(&folio->mapping->i_pages,
4141 folio_index(folio), PAGECACHE_TAG_DIRTY);
4142 xa_unlock_irq(&folio->mapping->i_pages);
4148 struct folio *folio = eb->folios[0];
4152 folio_lock(folio);
4153 last = btrfs_subpage_clear_and_test_dirty(fs_info, folio, eb->start, eb->len);
4155 btree_clear_folio_dirty(folio);
4156 folio_unlock(folio);
4196 struct folio *folio = eb->folios[i];
4198 if (!folio_test_dirty(folio))
4200 folio_lock(folio);
4201 btree_clear_folio_dirty(folio);
4202 folio_unlock(folio);
4259 struct folio *folio = eb->folios[i];
4261 if (!folio)
4269 folio_clear_uptodate(folio);
4271 btrfs_subpage_clear_uptodate(fs_info, folio,
4283 struct folio *folio = eb->folios[i];
4290 folio_mark_uptodate(folio);
4292 btrfs_subpage_set_uptodate(fs_info, folio,
4333 struct folio *folio = fi.folio;
4338 btrfs_folio_set_uptodate(fs_info, folio, start, len);
4340 btrfs_folio_clear_uptodate(fs_info, folio, start, len);
4403 struct folio *folio = eb->folios[i];
4405 ret = bio_add_folio(&bbio->bio, folio, eb->folio_size, 0);
4574 struct folio *folio = eb->folios[i];
4576 ASSERT(folio);
4590 struct folio *folio = eb->folios[0];
4593 if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, folio,
4595 btrfs_subpage_dump_bitmap(fs_info, folio, eb->start, eb->len);
4597 WARN_ON(!folio_test_uptodate(folio));
4738 * Calculate the folio and offset of the byte containing the given bit number.
4743 * @folio_index: return index of the folio in the extent buffer that contains
4745 * @folio_offset: return offset into the folio given by folio_index
5013 * Unlike try_release_extent_buffer() which uses folio private
5053 * check the folio private at the end. And
5059 * Finally to check if we have cleared folio private, as if we have
5060 * released all ebs in the page, the folio private should be cleared now.
5074 struct folio *folio = page_folio(page);
5081 * We need to make sure nobody is changing folio private, as we rely on
5082 * folio private as the pointer to extent buffer.
5085 if (!folio_test_private(folio)) {
5090 eb = folio_get_private(folio);