Lines Matching refs:fbatch

276  * @fbatch: batch of folios to delete
279 * @fbatch from the mapping. The function expects @fbatch to be sorted
281 * It tolerates holes in @fbatch (mapping entries at those indices are not
287 struct folio_batch *fbatch)
289 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);
296 if (i >= folio_batch_count(fbatch))
309 if (folio != fbatch->folios[i]) {
311 fbatch->folios[i]->index, folio);
328 struct folio_batch *fbatch)
332 if (!folio_batch_count(fbatch))
337 for (i = 0; i < folio_batch_count(fbatch); i++) {
338 struct folio *folio = fbatch->folios[i];
343 page_cache_delete_batch(mapping, fbatch);
349 for (i = 0; i < folio_batch_count(fbatch); i++)
350 filemap_free_folio(mapping, fbatch->folios[i]);
515 struct folio_batch fbatch;
518 folio_batch_init(&fbatch);
524 PAGECACHE_TAG_WRITEBACK, &fbatch);
530 struct folio *folio = fbatch.folios[i];
535 folio_batch_release(&fbatch);
2023 * @fbatch: Where the resulting entries are placed.
2027 * the mapping. The entries are placed in @fbatch. find_get_entries()
2039 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2046 indices[fbatch->nr] = xas.xa_index;
2047 if (!folio_batch_add(fbatch, folio))
2052 if (folio_batch_count(fbatch)) {
2054 int idx = folio_batch_count(fbatch) - 1;
2056 folio = fbatch->folios[idx];
2061 return folio_batch_count(fbatch);
2069 * @fbatch: Where the resulting entries are placed.
2070 * @indices: The cache indices of the entries in @fbatch.
2085 pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)
2105 indices[fbatch->nr] = xas.xa_index;
2106 if (!folio_batch_add(fbatch, folio))
2116 if (folio_batch_count(fbatch)) {
2118 int idx = folio_batch_count(fbatch) - 1;
2120 folio = fbatch->folios[idx];
2125 return folio_batch_count(fbatch);
2133 * @fbatch: The batch to fill.
2137 * in @fbatch with an elevated reference count.
2143 pgoff_t end, struct folio_batch *fbatch)
2145 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);
2154 * @fbatch: The batch to fill
2165 pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)
2190 if (!folio_batch_add(fbatch, folio)) {
2204 nr = folio_batch_count(fbatch);
2207 folio = fbatch->folios[nr - 1];
2212 return folio_batch_count(fbatch);
2222 * @fbatch: The batch to fill
2236 pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)
2250 if (!folio_batch_add(fbatch, folio)) {
2269 return folio_batch_count(fbatch);
2303 pgoff_t index, pgoff_t max, struct folio_batch *fbatch)
2322 if (!folio_batch_add(fbatch, folio))
2453 struct folio_batch *fbatch)
2488 folio_batch_add(fbatch, folio);
2509 struct folio_batch *fbatch, bool need_uptodate)
2525 filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2526 if (!folio_batch_count(fbatch)) {
2531 filemap_get_read_batch(mapping, index, last_index - 1, fbatch);
2533 if (!folio_batch_count(fbatch)) {
2537 iocb->ki_pos >> PAGE_SHIFT, fbatch);
2543 folio = fbatch->folios[folio_batch_count(fbatch) - 1];
2551 folio_batch_count(fbatch) > 1)
2563 if (likely(--fbatch->nr))
2597 struct folio_batch fbatch;
2609 folio_batch_init(&fbatch);
2625 error = filemap_get_pages(iocb, iter->count, &fbatch, false);
2653 fbatch.folios[0]))
2654 folio_mark_accessed(fbatch.folios[0]);
2656 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2657 struct folio *folio = fbatch.folios[i];
2688 for (i = 0; i < folio_batch_count(&fbatch); i++)
2689 folio_put(fbatch.folios[i]);
2690 folio_batch_init(&fbatch);
2867 struct folio_batch fbatch;
2885 folio_batch_init(&fbatch);
2894 error = filemap_get_pages(&iocb, len, &fbatch, true);
2917 for (i = 0; i < folio_batch_count(&fbatch); i++) {
2918 struct folio *folio = fbatch.folios[i];
2945 folio_batch_release(&fbatch);
2949 folio_batch_release(&fbatch);