Lines Matching defs:subpage

6 #include "subpage.h"
63 * Thus for metadata locking, subpage support relies on io_tree locking only.
74 * mapping. And if page->mapping->host is data inode, it's subpage.
81 * Now the only remaining case is metadata, which we only go subpage
123 struct btrfs_subpage *subpage;
132 /* Either not subpage, or the folio already has private attached. */
136 subpage = btrfs_alloc_subpage(fs_info, type);
137 if (IS_ERR(subpage))
138 return PTR_ERR(subpage);
140 folio_attach_private(folio, subpage);
146 struct btrfs_subpage *subpage;
148 /* Either not subpage, or the folio already has private attached. */
152 subpage = folio_detach_private(folio);
153 ASSERT(subpage);
154 btrfs_free_subpage(subpage);
181 void btrfs_free_subpage(struct btrfs_subpage *subpage)
183 kfree(subpage);
187 * Increase the eb_refs of current subpage.
197 struct btrfs_subpage *subpage;
205 subpage = folio_get_private(folio);
206 atomic_inc(&subpage->eb_refs);
211 struct btrfs_subpage *subpage;
219 subpage = folio_get_private(folio);
220 ASSERT(atomic_read(&subpage->eb_refs));
221 atomic_dec(&subpage->eb_refs);
227 /* For subpage support, the folio must be single page. */
256 struct btrfs_subpage *subpage = folio_get_private(folio);
264 spin_lock_irqsave(&subpage->lock, flags);
267 * locked the subpage range.
269 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
270 bitmap_set(subpage->bitmaps, start_bit, nbits);
271 atomic_add(nbits, &subpage->readers);
272 spin_unlock_irqrestore(&subpage->lock, flags);
278 struct btrfs_subpage *subpage = folio_get_private(folio);
288 spin_lock_irqsave(&subpage->lock, flags);
291 ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
292 ASSERT(atomic_read(&subpage->readers) >= nbits);
294 bitmap_clear(subpage->bitmaps, start_bit, nbits);
295 last = atomic_sub_and_test(nbits, &subpage->readers);
306 spin_unlock_irqrestore(&subpage->lock, flags);
317 * beyond the target range. In that case, just set @len to 0, subpage
330 struct btrfs_subpage *subpage = folio_get_private(folio);
338 spin_lock_irqsave(&subpage->lock, flags);
339 ASSERT(atomic_read(&subpage->readers) == 0);
340 ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
341 bitmap_set(subpage->bitmaps, start_bit, nbits);
342 ret = atomic_add_return(nbits, &subpage->writers);
344 spin_unlock_irqrestore(&subpage->lock, flags);
350 struct btrfs_subpage *subpage = folio_get_private(folio);
358 spin_lock_irqsave(&subpage->lock, flags);
364 * subpage::writers is 0. Handle them in a special way.
366 if (atomic_read(&subpage->writers) == 0) {
367 spin_unlock_irqrestore(&subpage->lock, flags);
371 ASSERT(atomic_read(&subpage->writers) >= nbits);
373 ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
374 bitmap_clear(subpage->bitmaps, start_bit, nbits);
375 last = atomic_sub_and_test(nbits, &subpage->writers);
376 spin_unlock_irqrestore(&subpage->lock, flags);
419 #define subpage_test_bitmap_all_set(fs_info, subpage, name) \
420 bitmap_test_range_all_set(subpage->bitmaps, \
424 #define subpage_test_bitmap_all_zero(fs_info, subpage, name) \
425 bitmap_test_range_all_zero(subpage->bitmaps, \
432 struct btrfs_subpage *subpage = folio_get_private(folio);
437 spin_lock_irqsave(&subpage->lock, flags);
438 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
439 if (subpage_test_bitmap_all_set(fs_info, subpage, uptodate))
441 spin_unlock_irqrestore(&subpage->lock, flags);
447 struct btrfs_subpage *subpage = folio_get_private(folio);
452 spin_lock_irqsave(&subpage->lock, flags);
453 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
455 spin_unlock_irqrestore(&subpage->lock, flags);
461 struct btrfs_subpage *subpage = folio_get_private(folio);
466 spin_lock_irqsave(&subpage->lock, flags);
467 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
468 spin_unlock_irqrestore(&subpage->lock, flags);
473 * Extra clear_and_test function for subpage dirty bitmap.
485 struct btrfs_subpage *subpage = folio_get_private(folio);
491 spin_lock_irqsave(&subpage->lock, flags);
492 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
493 if (subpage_test_bitmap_all_zero(fs_info, subpage, dirty))
495 spin_unlock_irqrestore(&subpage->lock, flags);
512 struct btrfs_subpage *subpage = folio_get_private(folio);
517 spin_lock_irqsave(&subpage->lock, flags);
518 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
521 spin_unlock_irqrestore(&subpage->lock, flags);
527 struct btrfs_subpage *subpage = folio_get_private(folio);
532 spin_lock_irqsave(&subpage->lock, flags);
533 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
534 if (subpage_test_bitmap_all_zero(fs_info, subpage, writeback)) {
538 spin_unlock_irqrestore(&subpage->lock, flags);
544 struct btrfs_subpage *subpage = folio_get_private(folio);
549 spin_lock_irqsave(&subpage->lock, flags);
550 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
552 spin_unlock_irqrestore(&subpage->lock, flags);
558 struct btrfs_subpage *subpage = folio_get_private(folio);
563 spin_lock_irqsave(&subpage->lock, flags);
564 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
565 if (subpage_test_bitmap_all_zero(fs_info, subpage, ordered))
567 spin_unlock_irqrestore(&subpage->lock, flags);
573 struct btrfs_subpage *subpage = folio_get_private(folio);
578 spin_lock_irqsave(&subpage->lock, flags);
579 bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
580 if (subpage_test_bitmap_all_set(fs_info, subpage, checked))
582 spin_unlock_irqrestore(&subpage->lock, flags);
588 struct btrfs_subpage *subpage = folio_get_private(folio);
593 spin_lock_irqsave(&subpage->lock, flags);
594 bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
596 spin_unlock_irqrestore(&subpage->lock, flags);
607 struct btrfs_subpage *subpage = folio_get_private(folio); \
613 spin_lock_irqsave(&subpage->lock, flags); \
614 ret = bitmap_test_range_all_set(subpage->bitmaps, start_bit, \
616 spin_unlock_irqrestore(&subpage->lock, flags); \
703 * Make sure not only the page dirty bit is cleared, but also subpage dirty bit
708 struct btrfs_subpage *subpage = folio_get_private(folio);
718 ASSERT(subpage_test_bitmap_all_zero(fs_info, subpage, dirty));
725 * It should not have any subpage::writers count.
734 * In this case, we have to call subpage helper to handle the case.
739 struct btrfs_subpage *subpage;
742 /* For non-subpage case, we just unlock the page */
749 subpage = folio_get_private(folio);
752 * For subpage case, there are two types of locked page. With or
755 * Since we own the page lock, no one else could touch subpage::writers
758 if (atomic_read(&subpage->writers) == 0) {
764 /* Have writers, use proper subpage helper to end it */
768 #define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \
769 bitmap_cut(dst, subpage->bitmaps, 0, \
776 struct btrfs_subpage *subpage;
787 subpage = folio_get_private(folio);
789 spin_lock_irqsave(&subpage->lock, flags);
790 GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap);
791 GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap);
792 GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap);
793 GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap);
794 GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap);
795 GET_SUBPAGE_BITMAP(subpage, subpage_info, locked, &checked_bitmap);
796 spin_unlock_irqrestore(&subpage->lock, flags);
798 dump_page(folio_page(folio, 0), "btrfs subpage dump");