/linux-master/include/linux/ |
H A D | pagevec.h | 6 * folios. A folio_batch is a container which is used for that. 20 * struct folio_batch - A collection of folios. 23 * operating on a set of folios. The order of folios in the batch may be 32 struct folio *folios[PAGEVEC_SIZE]; member in struct:folio_batch 36 * folio_batch_init() - Initialise a batch of folios 39 * A freshly initialised folio_batch contains zero folios. 77 fbatch->folios[fbatch->nr++] = folio; 85 * Use this function to implement a queue of folios. 93 return fbatch->folios[fbatc [all...] |
H A D | mm_inline.h | 22 * right LRU list and to account folios correctly. 265 list_add_tail(&folio->lru, &lrugen->folios[gen][type][zone]); 267 list_add(&folio->lru, &lrugen->folios[gen][type][zone]);
|
H A D | memcontrol.h | 383 * against some type of folios, e.g. slab folios or ex-slab folios or 384 * kmem folios. 404 * against some type of folios, e.g. slab folios or ex-slab folios or 405 * LRU folios. 425 * against some type of folios, e.g. slab folios o 717 mem_cgroup_uncharge_folios(struct folio_batch *folios) argument 1296 mem_cgroup_uncharge_folios(struct folio_batch *folios) argument [all...] |
/linux-master/fs/btrfs/ |
H A D | accessors.c | 31 token->kaddr = folio_address(eb->folios[0]); 54 * The extent buffer pages stored in the array folios may not form a contiguous 80 token->kaddr = folio_address(token->eb->folios[idx]); \ 86 token->kaddr = folio_address(token->eb->folios[idx + 1]); \ 99 char *kaddr = folio_address(eb->folios[idx]); \ 109 kaddr = folio_address(eb->folios[idx + 1]); \ 135 token->kaddr = folio_address(token->eb->folios[idx]); \ 144 token->kaddr = folio_address(token->eb->folios[idx + 1]); \ 156 char *kaddr = folio_address(eb->folios[idx]); \ 170 kaddr = folio_address(eb->folios[id [all...] |
H A D | extent_io.c | 223 struct folio *folio = fbatch.folios[i]; 274 struct folio *folio = fbatch.folios[i]; 476 /* Only order 0 (single page) folios are allowed for data. */ 609 /* For now only order 0 folios are supported for data. */ 704 * Populate needed folios for the extent buffer. 706 * For now, the folios populated are always in order 0 (aka, single page). 719 eb->folios[i] = page_folio(page_array[i]); 1707 struct folio *folio = eb->folios[0]; 1726 struct folio *folio = eb->folios[i]; 1947 struct folio *folio = fbatch.folios[ [all...] |
H A D | extent_io.h | 116 * Pointers to all the folios of the extent buffer. 120 struct folio *folios[INLINE_EXTENT_BUFFER_PAGES]; member in struct:extent_buffer 157 * 1.2) Several page sized folios 167 return offset_in_folio(eb->folios[0], offset + eb->start); 178 * 1.2) Several page sized folios 285 * This can only be determined at runtime by checking eb::folios[0]. 289 * single-paged folios. 293 if (folio_order(eb->folios[0]))
|
H A D | defrag.c | 1165 struct folio **folios, int nr_pages, 1174 unsigned long first_index = folios[0]->index; 1191 folio_clear_checked(folios[i]); 1192 btrfs_folio_clamp_set_dirty(fs_info, folios[i], start, len); 1208 struct folio **folios; local 1219 folios = kcalloc(nr_pages, sizeof(struct folio *), GFP_NOFS); 1220 if (!folios) 1225 folios[i] = defrag_prepare_one_folio(inode, start_index + i); 1226 if (IS_ERR(folios[i])) { 1227 ret = PTR_ERR(folios[ 1163 defrag_one_locked_target(struct btrfs_inode *inode, struct defrag_target_range *target, struct folio **folios, int nr_pages, struct extent_state **cached_state) argument [all...] |
H A D | tree-checker.c | 68 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); 96 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); 157 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); 653 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); 1010 dump_page(folio_page(eb->folios[0], 0), "eb page dump"); 1266 dump_page(folio_page(eb->folios[0], 0), "eb page dump");
|
/linux-master/mm/ |
H A D | swap.c | 226 struct folio *folio = fbatch->folios[i]; 418 struct folio *batch_folio = fbatch->folios[i]; 643 * Lazyfree folios are clean anonymous folios. They have 645 * anonymous folios 964 * folios_put_refs - Reduce the reference count on a batch of folios. 965 * @folios: The folios. 968 * Like folio_put(), but for a batch of folios. This is more efficient 970 * to be taken if the folios ar 978 folios_put_refs(struct folio_batch *folios, unsigned int *refs) argument [all...] |
H A D | truncate.c | 70 if (xa_is_value(fbatch->folios[j])) 83 struct folio *folio = fbatch->folios[i]; 87 fbatch->folios[j++] = folio; 200 * Handle partial folios. The folio may be entirely within the 274 * It only drops clean, unused folios. 358 truncate_cleanup_folio(fbatch.folios[i]); 361 folio_unlock(fbatch.folios[i]); 405 struct folio *folio = fbatch.folios[i]; 479 * mapping_try_invalidate - Invalidate all the evictable folios of one inode 480 * @mapping: the address_space which holds the folios t [all...] |
H A D | swap_state.c | 138 * This must be called only on folios that have 230 * This must be called only on folios that have 314 struct folio_batch folios; local 318 folio_batch_init(&folios); 323 refs[folios.nr] = 1; 326 refs[folios.nr] = encoded_nr_pages(pages[++i]); 328 if (folio_batch_add(&folios, folio) == 0) 329 folios_put_refs(&folios, refs); 331 if (folios.nr) 332 folios_put_refs(&folios, ref [all...] |
H A D | migrate.c | 688 * folios that do not use PagePrivate/PagePrivate2. 811 * buffer_migrate_folio() - Migration function for folios with buffers. 833 * buffer_migrate_folio_norefs() - Migration function for folios with buffers. 922 /* Only writeback folios in full synchronous migration */ 972 * Most folios have a mapping and most filesystems 973 * provide a migrate_folio callback. Anonymous folios 1506 int nr_succeeded; /* Normal and large folios migrated successfully, in 1508 int nr_failed_pages; /* Normal and large folios failed to be migrated, in 1509 units of base pages. Untried folios aren't counted */ 1517 * Returns the number of hugetlb folios tha [all...] |
H A D | filemap.c | 186 /* hugetlb folios do not participate in page cache accounting. */ 253 * This must be called only on folios that are locked and have been 274 * page_cache_delete_batch - delete several folios from page cache 275 * @mapping: the mapping to which folios belong 276 * @fbatch: batch of folios to delete 278 * The function walks over mapping->i_pages and removes folios passed in 289 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); 309 if (folio != fbatch->folios[i]) { 311 fbatch->folios[i]->index, folio); 338 struct folio *folio = fbatch->folios[ [all...] |
H A D | mlock.c | 51 * Mlocked folios are marked with the PG_mlocked flag for efficient testing 194 folio = fbatch->folios[i]; 197 fbatch->folios[i] = folio;
|
H A D | page_alloc.c | 2519 * Free a batch of folios 2521 void free_unref_folios(struct folio_batch *folios) argument 2528 /* Prepare folios for freeing */ 2529 for (i = 0, j = 0; i < folios->nr; i++) { 2530 struct folio *folio = folios->folios[i]; 2540 * Free isolated folios and orders not handled on the PCP 2552 folios->folios[j] = folio; 2555 folios [all...] |
/linux-master/tools/mm/ |
H A D | thpmaps | 252 folios = indexes[index_next:index_end][heads[index_next:index_end]] 256 nr = (int(folios[0]) if len(folios) else index_end) - index_next 261 if len(folios): 264 nr = index_end - int(folios[-1]) 265 folios = folios[:-1] 270 if len(folios): 271 folio_nrs = np.append(np.diff(folios), np.uint64(index_end - folios[ [all...] |
/linux-master/fs/ramfs/ |
H A D | file-nommu.c | 235 ret = (unsigned long) folio_address(fbatch.folios[0]); 236 pfn = folio_pfn(fbatch.folios[0]); 240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) { 244 nr_pages += folio_nr_pages(fbatch.folios[loop]);
|
/linux-master/fs/bcachefs/ |
H A D | fs-io-pagecache.h | 7 typedef DARRAY(struct folio *) folios; typedef 10 u64, fgf_t, gfp_t, folios *); 88 /* for newly allocated folios: */
|
H A D | fs-io-buffered.c | 49 folios folios; member in struct:readpages_iter 61 darray_push(&iter->folios, folio)) { 65 return iter->folios.nr ? 0 : -ENOMEM; 76 if (iter->idx >= iter->folios.nr) 78 return iter->folios.data[iter->idx]; 281 readpages_iter.folios.nr - 303 darray_exit(&readpages_iter.folios); 800 static noinline void folios_trunc(folios *fs, struct folio **fi) 818 folios f [all...] |
H A D | fs-io-pagecache.c | 17 folios *fs) 125 /* for newly allocated folios: */ 286 struct folio *folio = fbatch.folios[i]; 331 struct folio *folio = fbatch.folios[i]; 687 struct folio *folio = fbatch.folios[i];
|
/linux-master/fs/nilfs2/ |
H A D | page.c | 193 * This function is for both data folios and btnode folios. The dirty flag 258 struct folio *folio = fbatch.folios[i], *dfolio; 312 struct folio *folio = fbatch.folios[i], *dfolio; 373 struct folio *folio = fbatch.folios[i]; 518 folio = fbatch.folios[i];
|
/linux-master/fs/smb/client/ |
H A D | cifsencrypt.c | 106 struct folio *folios[16], *folio; local 119 nr = xa_extract(iter->xarray, (void **)folios, index, last, 120 ARRAY_SIZE(folios), XA_PRESENT); 125 folio = folios[i]; 144 } while (nr == ARRAY_SIZE(folios));
|
/linux-master/fs/gfs2/ |
H A D | aops.c | 198 * gfs2_write_jdata_batch - Write back a folio batch's worth of folios 201 * @fbatch: The batch of folios 221 size += folio_size(fbatch->folios[i]); 229 struct folio *folio = fbatch->folios[i]; 687 * mm accommodates an old ext3 case where clean folios might 689 * dirty folios to ->release_folio() via shrink_active_list(). 691 * As a workaround, we skip folios that contain dirty buffers 692 * below. Once ->release_folio isn't called on dirty folios
|
/linux-master/fs/btrfs/tests/ |
H A D | extent-io-tests.c | 38 struct folio *folio = fbatch.folios[i]; 675 struct page *page = folio_page(eb->folios[i >> PAGE_SHIFT], 0); 691 void *eb_addr = folio_address(eb->folios[i]);
|
/linux-master/fs/ceph/ |
H A D | addr.c | 1034 page = &fbatch.folios[i]->page; 1175 fbatch.folios[i] = NULL; 1186 if (!fbatch.folios[j]) 1189 fbatch.folios[n] = fbatch.folios[j]; 1349 doutc(cl, "folio_batch release on %d folios (%p)\n", 1350 (int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL); 1373 page = &fbatch.folios[i]->page;
|