Searched refs:reclaimed (Results 1 - 14 of 14) sorted by relevance

/linux-master/mm/
H A Dvmpressure.c26 * we try to analyze scanned/reclaimed ratio. So the window is used as a
42 * scanned/reclaimed ratio. The current values were chosen empirically. In
121 unsigned long reclaimed)
123 unsigned long scale = scanned + reclaimed;
127 * reclaimed can be greater than scanned for things such as reclaimed
128 * slab pages. shrink_node() just adds reclaimed pages without a
131 if (reclaimed >= scanned)
135 * scanned vs. reclaimed in a given time frame (window). Note that
140 pressure = scale - (reclaimed * scal
120 vmpressure_calc_level(unsigned long scanned, unsigned long reclaimed) argument
184 unsigned long reclaimed; local
239 vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) argument
[all...]
H A Dvmscan.c105 /* Can mapped folios be reclaimed? */
122 * don't threaten to OOM. If any cgroup is reclaimed at
124 * setting (memcg_low_skipped), and nothing is reclaimed as a
173 /* for recording the reclaimed slab by now */
266 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
272 * Currently, reclaim_state->reclaimed includes three types of pages
281 * If we count the entire page as reclaimed from the memcg, we end up
282 * overestimating the reclaimed amount (potentially under-reclaiming).
289 * charged to the target memcg, we end up underestimating the reclaimed
300 sc->nr_reclaimed += current->reclaim_state->reclaimed;
684 __remove_mapping(struct address_space *mapping, struct folio *folio, bool reclaimed, struct mem_cgroup *target_memcg) argument
4511 int reclaimed; local
4755 unsigned long reclaimed = sc->nr_reclaimed; local
4909 unsigned long reclaimed = sc->nr_reclaimed; local
5837 unsigned long reclaimed; local
[all...]
H A Dmemcontrol.c2698 * been aggressively reclaimed enough yet.
3730 unsigned long reclaimed; local
3764 reclaimed = mem_cgroup_soft_reclaim(mz->memcg, pgdat,
3766 nr_reclaimed += reclaimed;
3774 if (!reclaimed)
6777 unsigned long reclaimed; local
6791 reclaimed = try_to_free_mem_cgroup_pages(memcg, nr_pages - high,
6794 if (!reclaimed && !nr_retries--)
6989 unsigned long reclaimed; local
7002 reclaimed
[all...]
/linux-master/include/linux/
H A Dvmpressure.h15 unsigned long reclaimed; member in struct:vmpressure
19 /* The lock is used to keep the scanned/reclaimed above in sync. */
34 unsigned long scanned, unsigned long reclaimed);
48 unsigned long scanned, unsigned long reclaimed) {}
47 vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, unsigned long scanned, unsigned long reclaimed) argument
H A Dswap.h156 /* pages reclaimed outside of LRU-based reclaim */
157 unsigned long reclaimed; member in struct:reclaim_state
165 * mm_account_reclaimed_pages(): account reclaimed pages outside of LRU-based
167 * @pages: number of pages reclaimed
170 * number of reclaimed pages by @pages.
175 current->reclaim_state->reclaimed += pages;
/linux-master/drivers/md/dm-vdo/
H A Dstatistics.h112 /* number of gets that are reclaimed */
113 u64 reclaimed; member in struct:block_map_statistics
H A Dmessage-stats.c205 /* number of gets that are reclaimed */
206 write_u64("reclaimed : ", stats->reclaimed, ", ", buf, maxlen);
H A Dblock-map.c1046 bool was_discard, reclaimed; local
1068 reclaimed = (!was_discard || (info->busy > 0) || vdo_waitq_has_waiters(&info->waiting));
1073 ADD_ONCE(cache->stats.reclaimed, reclamations);
1078 if (reclaimed)
3306 totals.reclaimed += READ_ONCE(stats->reclaimed);
/linux-master/tools/testing/selftests/cgroup/
H A Dtest_memcontrol.c691 bool reclaimed = false; local
699 else if (reclaimed)
706 reclaimed = true;
710 return reclaimed;
/linux-master/arch/x86/kernel/cpu/sgx/
H A Dencl.c28 * a PCMD page is in process of being reclaimed.
32 * When an enclave page is reclaimed some Paging Crypto MetaData (PCMD) is
33 * stored. The PCMD data of a reclaimed enclave page contains enough
37 * The backing storage to which enclave pages are reclaimed is laid out as
47 * reclaimed.
61 int reclaimed = 0; local
96 reclaimed = 1;
101 return reclaimed;
/linux-master/drivers/net/ethernet/marvell/
H A Dmv643xx_eth.c1061 int reclaimed; local
1065 reclaimed = 0;
1066 while (reclaimed < budget && txq->tx_desc_count > 0) {
1088 reclaimed++;
1121 if (reclaimed < budget)
1124 return reclaimed;
/linux-master/drivers/block/drbd/
H A Ddrbd_receiver.c212 LIST_HEAD(reclaimed);
216 reclaim_finished_net_peer_reqs(device, &reclaimed);
218 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
446 LIST_HEAD(reclaimed);
451 reclaim_finished_net_peer_reqs(device, &reclaimed);
455 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
/linux-master/drivers/net/ethernet/hisilicon/hns3/
H A Dhns3_enet.c1146 /* This tx spare buffer is only really reclaimed after calling
3587 bool reclaimed = false; local
3614 reclaimed = true;
3617 if (unlikely(!reclaimed))
/linux-master/drivers/net/ethernet/chelsio/cxgb4/
H A Dsge.c108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
1407 unsigned int reclaimed, hw_cidx; local
1415 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
1433 return reclaimed;
2021 * Descriptors are reclaimed after their DMAs complete.
4318 /* We reclaimed all reclaimable TX Descriptors, so reschedule

Completed in 319 milliseconds