Lines Matching refs:fbatch

50 /* Protecting only lru_rotate.fbatch which requires disabling interrupts */
53 struct folio_batch fbatch;
141 struct folio_batch fbatch;
144 folio_batch_init(&fbatch);
153 if (folio_batch_add(&fbatch, folio) > 0)
155 free_unref_folios(&fbatch);
158 if (fbatch.nr)
159 free_unref_folios(&fbatch);
206 static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn)
212 for (i = 0; i < folio_batch_count(fbatch); i++) {
213 struct folio *folio = fbatch->folios[i];
227 folios_put(fbatch);
230 static void folio_batch_add_and_move(struct folio_batch *fbatch,
233 if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) &&
236 folio_batch_move_lru(fbatch, move_fn);
260 struct folio_batch *fbatch;
265 fbatch = this_cpu_ptr(&lru_rotate.fbatch);
266 folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn);
348 struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu);
350 if (folio_batch_count(fbatch))
351 folio_batch_move_lru(fbatch, folio_activate_fn);
358 struct folio_batch *fbatch;
362 fbatch = this_cpu_ptr(&cpu_fbatches.activate);
363 folio_batch_add_and_move(fbatch, folio, folio_activate_fn);
388 struct folio_batch *fbatch;
392 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
404 for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) {
405 struct folio *batch_folio = fbatch->folios[i];
508 struct folio_batch *fbatch;
521 fbatch = this_cpu_ptr(&cpu_fbatches.lru_add);
522 folio_batch_add_and_move(fbatch, folio, lru_add_fn);
654 struct folio_batch *fbatch = &fbatches->lru_add;
656 if (folio_batch_count(fbatch))
657 folio_batch_move_lru(fbatch, lru_add_fn);
659 fbatch = &per_cpu(lru_rotate.fbatch, cpu);
661 if (data_race(folio_batch_count(fbatch))) {
666 folio_batch_move_lru(fbatch, lru_move_tail_fn);
670 fbatch = &fbatches->lru_deactivate_file;
671 if (folio_batch_count(fbatch))
672 folio_batch_move_lru(fbatch, lru_deactivate_file_fn);
674 fbatch = &fbatches->lru_deactivate;
675 if (folio_batch_count(fbatch))
676 folio_batch_move_lru(fbatch, lru_deactivate_fn);
678 fbatch = &fbatches->lru_lazyfree;
679 if (folio_batch_count(fbatch))
680 folio_batch_move_lru(fbatch, lru_lazyfree_fn);
697 struct folio_batch *fbatch;
705 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file);
706 folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn);
722 struct folio_batch *fbatch;
726 fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate);
727 folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn);
744 struct folio_batch *fbatch;
748 fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree);
749 folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn);
801 data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) ||
1042 struct folio_batch fbatch;
1047 folio_batch_init(&fbatch);
1053 refs[fbatch.nr] = 1;
1056 refs[fbatch.nr] = encoded_nr_pages(encoded[++i]);
1058 if (folio_batch_add(&fbatch, folio) > 0)
1060 folios_put_refs(&fbatch, refs);
1063 if (fbatch.nr)
1064 folios_put_refs(&fbatch, refs);
1078 void __folio_batch_release(struct folio_batch *fbatch)
1080 if (!fbatch->percpu_pvec_drained) {
1082 fbatch->percpu_pvec_drained = true;
1084 folios_put(fbatch);
1090 * @fbatch: The batch to prune
1093 * entries. This function prunes all the non-folio entries from @fbatch
1097 void folio_batch_remove_exceptionals(struct folio_batch *fbatch)
1101 for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) {
1102 struct folio *folio = fbatch->folios[i];
1104 fbatch->folios[j++] = folio;
1106 fbatch->nr = j;