Lines Matching refs:shadow

177  * slot of the evicted page.  This is called a shadow entry.
179 * On cache misses for which there are shadow entries, an eligible
210 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat,
213 unsigned long entry = xa_to_value(shadow);
260 * Tests if the shadow entry is for a folio that was recently evicted.
261 * Fills in @lruvec, @token, @workingset with the values unpacked from shadow.
263 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
271 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
280 static void lru_gen_refault(struct folio *folio, void *shadow)
293 recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset);
334 static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec,
340 static void lru_gen_refault(struct folio *folio, void *shadow)
379 * Return: a shadow entry to be stored in @folio->mapping->i_pages in place
408 * workingset_test_recent - tests if the shadow entry is for a folio that was
410 * shadow.
411 * @shadow: the shadow entry to be tested.
413 * @workingset: where the workingset value unpacked from shadow should
416 * Return: true if the shadow is for a recently evicted folio; false otherwise.
418 bool workingset_test_recent(void *shadow, bool file, bool *workingset)
432 bool recent = lru_gen_test_recent(shadow, file,
440 unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset);
483 * special case: usually, shadow entries have a short lifetime
486 * nonresident_age to lap a shadow entry in the field, which
524 * @shadow: Shadow entry of the evicted folio.
530 void workingset_refault(struct folio *folio, void *shadow)
540 lru_gen_refault(folio, shadow);
561 if (!workingset_test_recent(shadow, file, &workingset))
611 * create excessive amounts of shadow nodes. To keep a lid on this,
612 * track shadow nodes and reclaim them when they grow way past the
623 * Track non-empty nodes that contain only shadow entries;
659 * containing shadow entries. We don't need to keep more
660 * shadow entries than possible pages on the active list,
667 * Nodes might be sparsely populated, with only one shadow
669 * node for every eligible shadow entry, so compromise on a
674 * each, this will reclaim shadow entries when they consume
715 * the shadow node LRU under the i_pages lock and the
750 * The nodes should only contain one or more shadow entries,
813 "mm-shadow");