Lines Matching refs:pgdat

466 static bool skip_throttle_noprogress(pg_data_t *pgdat)
475 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
484 struct zone *zone = pgdat->node_zones + i;
499 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason)
501 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
530 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
531 WRITE_ONCE(pgdat->nr_reclaim_start,
532 node_page_state(pgdat, NR_THROTTLED_WRITTEN));
539 if (skip_throttle_noprogress(pgdat)) {
561 atomic_dec(&pgdat->nr_writeback_throttled);
563 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout),
573 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
587 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) -
588 READ_ONCE(pgdat->nr_reclaim_start);
591 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]);
955 struct pglist_data *pgdat)
957 int target_nid = next_demotion_node(pgdat->node_id);
979 node_get_allowed_targets(pgdat, &allowed_mask);
986 mod_node_page_state(pgdat, PGDEMOTE_KSWAPD + reclaimer_offset(),
1012 struct pglist_data *pgdat, struct scan_control *sc,
1026 do_demote_pass = can_demote(pgdat->node_id, sc);
1131 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) {
1296 !test_bit(PGDAT_DIRTY, &pgdat->flags))) {
1463 nr_reclaimed += demote_folio_list(&demote_folios, pgdat);
1754 static bool too_many_isolated(struct pglist_data *pgdat, int file,
1767 inactive = node_page_state(pgdat, NR_INACTIVE_FILE);
1768 isolated = node_page_state(pgdat, NR_ISOLATED_FILE);
1770 inactive = node_page_state(pgdat, NR_INACTIVE_ANON);
1771 isolated = node_page_state(pgdat, NR_ISOLATED_ANON);
1786 wake_throttle_isolated(pgdat);
1891 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1894 while (unlikely(too_many_isolated(pgdat, file, sc))) {
1900 reclaim_throttle(pgdat, VMSCAN_THROTTLE_ISOLATED);
1914 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
1926 nr_reclaimed = shrink_folio_list(&folio_list, pgdat, sc, &stat, false);
1931 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
1964 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
1976 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
2012 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2021 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
2084 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken);
2089 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate,
2094 struct pglist_data *pgdat,
2108 nr_reclaimed = shrink_folio_list(folio_list, pgdat, &sc, &dummy_stat, ignore_references);
2220 static void prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc)
2228 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
2300 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
2301 file = node_page_state(pgdat, NR_ACTIVE_FILE) +
2302 node_page_state(pgdat, NR_INACTIVE_FILE);
2305 struct zone *zone = &pgdat->node_zones[z];
2318 anon = node_page_state(pgdat, NR_INACTIVE_ANON);
2337 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2348 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
2535 static bool can_age_anon_pages(struct pglist_data *pgdat,
2543 return can_demote(pgdat->node_id, sc);
2591 struct pglist_data *pgdat = NODE_DATA(nid);
2598 if (!lruvec->pgdat)
2599 lruvec->pgdat = pgdat;
2606 return &pgdat->__lruvec;
2612 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
2617 if (!can_demote(pgdat->node_id, sc) &&
2762 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
2766 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap);
3300 struct pglist_data *pgdat, bool can_swap)
3305 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3309 if (folio_nid(folio) != pgdat->node_id)
3341 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3372 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
3409 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3453 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap);
3516 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec);
3526 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat))
3650 static struct lru_gen_mm_walk *set_mm_walk(struct pglist_data *pgdat, bool force_alloc)
3654 if (pgdat && current_is_kswapd()) {
3657 walk = &pgdat->mm_walk;
3938 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
3951 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4001 struct pglist_data *pgdat = folio_pgdat(folio);
4002 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
4053 folio = get_pfn_folio(pfn, memcg, pgdat, can_swap);
4109 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4111 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
4124 new = get_memcg_gen(pgdat->memcg_lru.seq);
4126 new = get_memcg_gen(pgdat->memcg_lru.seq + 1);
4136 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4138 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]);
4140 pgdat->memcg_lru.nr_memcgs[old]--;
4141 pgdat->memcg_lru.nr_memcgs[new]++;
4143 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
4144 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4146 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
4158 struct pglist_data *pgdat = NODE_DATA(nid);
4161 spin_lock_irq(&pgdat->memcg_lru.lock);
4165 gen = get_memcg_gen(pgdat->memcg_lru.seq);
4169 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]);
4170 pgdat->memcg_lru.nr_memcgs[gen]++;
4172 spin_unlock_irq(&pgdat->memcg_lru.lock);
4193 struct pglist_data *pgdat = NODE_DATA(nid);
4196 spin_lock_irq(&pgdat->memcg_lru.lock);
4204 pgdat->memcg_lru.nr_memcgs[gen]--;
4206 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
4207 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
4209 spin_unlock_irq(&pgdat->memcg_lru.lock);
4521 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4537 reclaimed = shrink_folio_list(&list, pgdat, sc, &stat, false);
4539 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id,
4757 struct pglist_data *pgdat = lruvec_pgdat(lruvec);
4774 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority);
4793 static void shrink_many(struct pglist_data *pgdat, struct scan_control *sc)
4804 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq));
4812 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) {
4884 static void set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc)
4896 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE);
4897 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
4898 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON);
4906 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
4925 set_mm_walk(pgdat, sc->proactive);
4927 set_initial_priority(pgdat, sc);
4933 shrink_one(&pgdat->__lruvec, sc);
4935 shrink_many(pgdat, sc);
4945 pgdat->kswapd_failures = 0;
5533 void lru_gen_init_pgdat(struct pglist_data *pgdat)
5537 spin_lock_init(&pgdat->memcg_lru.lock);
5541 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i);
5624 static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc)
5634 static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc)
5777 static inline bool should_continue_reclaim(struct pglist_data *pgdat,
5804 struct zone *zone = &pgdat->node_zones[z];
5822 inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
5823 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc))
5824 inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
5829 static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
5836 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
5875 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg,
5887 static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
5894 lru_gen_shrink_node(pgdat, sc);
5898 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
5906 prepare_scan_control(pgdat, sc);
5908 shrink_node_memcgs(pgdat, sc);
5941 set_bit(PGDAT_WRITEBACK, &pgdat->flags);
5945 set_bit(PGDAT_DIRTY, &pgdat->flags);
5955 reclaim_throttle(pgdat, VMSCAN_THROTTLE_WRITEBACK);
5983 reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED);
5985 if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc))
5995 pgdat->kswapd_failures = 0;
6035 static void consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc)
6044 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS];
6062 reclaim_throttle(pgdat, VMSCAN_THROTTLE_NOPROGRESS);
6165 static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
6173 target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
6285 static bool allow_direct_reclaim(pg_data_t *pgdat)
6293 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6297 zone = &pgdat->node_zones[i];
6315 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
6316 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL)
6317 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL);
6319 wake_up_interruptible(&pgdat->kswapd_wait);
6339 pg_data_t *pgdat = NULL;
6378 pgdat = zone->zone_pgdat;
6379 if (allow_direct_reclaim(pgdat))
6385 if (!pgdat)
6400 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait,
6401 allow_direct_reclaim(pgdat), HZ);
6405 allow_direct_reclaim(pgdat));
6462 pg_data_t *pgdat,
6465 struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
6539 static void kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc)
6545 lru_gen_age_node(pgdat, sc);
6549 if (!can_age_anon_pages(pgdat, sc))
6552 lruvec = mem_cgroup_lruvec(NULL, pgdat);
6558 lruvec = mem_cgroup_lruvec(memcg, pgdat);
6565 static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
6578 zone = pgdat->node_zones + i;
6593 static bool pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx)
6604 zone = pgdat->node_zones + i;
6628 /* Clear pgdat state for congested, dirty or under writeback. */
6629 static void clear_pgdat_congested(pg_data_t *pgdat)
6631 struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat);
6635 clear_bit(PGDAT_DIRTY, &pgdat->flags);
6636 clear_bit(PGDAT_WRITEBACK, &pgdat->flags);
6645 static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order,
6661 if (waitqueue_active(&pgdat->pfmemalloc_wait))
6662 wake_up_all(&pgdat->pfmemalloc_wait);
6665 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
6668 if (pgdat_balanced(pgdat, order, highest_zoneidx)) {
6669 clear_pgdat_congested(pgdat);
6684 static bool kswapd_shrink_node(pg_data_t *pgdat,
6693 zone = pgdat->node_zones + z;
6704 shrink_node(pgdat, sc);
6721 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active)
6727 zone = pgdat->node_zones + i;
6740 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6742 update_reclaim_active(pgdat, highest_zoneidx, true);
6746 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx)
6748 update_reclaim_active(pgdat, highest_zoneidx, false);
6764 static int balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx)
6793 zone = pgdat->node_zones + i;
6803 set_reclaim_active(pgdat, highest_zoneidx);
6826 zone = pgdat->node_zones + i;
6836 * If the pgdat is imbalanced then ignore boosting and preserve
6842 balanced = pgdat_balanced(pgdat, sc.order, highest_zoneidx);
6874 kswapd_age_node(pgdat, &sc);
6886 nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(pgdat, sc.order,
6895 if (kswapd_shrink_node(pgdat, &sc))
6903 if (waitqueue_active(&pgdat->pfmemalloc_wait) &&
6904 allow_direct_reclaim(pgdat))
6905 wake_up_all(&pgdat->pfmemalloc_wait);
6944 pgdat->kswapd_failures++;
6947 clear_reclaim_active(pgdat, highest_zoneidx);
6958 zone = pgdat->node_zones + i;
6968 wakeup_kcompactd(pgdat, pageblock_order, highest_zoneidx);
6971 snapshot_refaults(NULL, pgdat);
6986 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
6992 static enum zone_type kswapd_highest_zoneidx(pg_data_t *pgdat,
6995 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7000 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
7009 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7018 if (prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7025 reset_isolation_suitable(pgdat);
7031 wakeup_kcompactd(pgdat, alloc_order, highest_zoneidx);
7041 WRITE_ONCE(pgdat->kswapd_highest_zoneidx,
7042 kswapd_highest_zoneidx(pgdat,
7045 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
7046 WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
7049 finish_wait(&pgdat->kswapd_wait, &wait);
7050 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
7058 prepare_kswapd_sleep(pgdat, reclaim_order, highest_zoneidx)) {
7059 trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
7069 set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
7074 set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
7081 finish_wait(&pgdat->kswapd_wait, &wait);
7101 pg_data_t *pgdat = (pg_data_t *)p;
7103 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
7123 WRITE_ONCE(pgdat->kswapd_order, 0);
7124 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7125 atomic_set(&pgdat->nr_writeback_throttled, 0);
7129 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
7130 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7134 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order,
7138 alloc_order = READ_ONCE(pgdat->kswapd_order);
7139 highest_zoneidx = kswapd_highest_zoneidx(pgdat,
7141 WRITE_ONCE(pgdat->kswapd_order, 0);
7142 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES);
7162 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx,
7164 reclaim_order = balance_pgdat(pgdat, alloc_order,
7178 * pgdat. It will wake up kcompactd after reclaiming memory. If kswapd reclaim
7185 pg_data_t *pgdat;
7194 pgdat = zone->zone_pgdat;
7195 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx);
7198 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx);
7200 if (READ_ONCE(pgdat->kswapd_order) < order)
7201 WRITE_ONCE(pgdat->kswapd_order, order);
7203 if (!waitqueue_active(&pgdat->kswapd_wait))
7207 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ||
7208 (pgdat_balanced(pgdat, order, highest_zoneidx) &&
7209 !pgdat_watermark_boosted(pgdat, highest_zoneidx))) {
7218 wakeup_kcompactd(pgdat, order, highest_zoneidx);
7222 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order,
7224 wake_up_interruptible(&pgdat->kswapd_wait);
7271 pg_data_t *pgdat = NODE_DATA(nid);
7273 pgdat_kswapd_lock(pgdat);
7274 if (!pgdat->kswapd) {
7275 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
7276 if (IS_ERR(pgdat->kswapd)) {
7279 nid, PTR_ERR(pgdat->kswapd));
7281 pgdat->kswapd = NULL;
7284 pgdat_kswapd_unlock(pgdat);
7293 pg_data_t *pgdat = NODE_DATA(nid);
7296 pgdat_kswapd_lock(pgdat);
7297 kswapd = pgdat->kswapd;
7300 pgdat->kswapd = NULL;
7302 pgdat_kswapd_unlock(pgdat);
7345 static inline unsigned long node_unmapped_file_pages(struct pglist_data *pgdat)
7347 unsigned long file_mapped = node_page_state(pgdat, NR_FILE_MAPPED);
7348 unsigned long file_lru = node_page_state(pgdat, NR_INACTIVE_FILE) +
7349 node_page_state(pgdat, NR_ACTIVE_FILE);
7360 static unsigned long node_pagecache_reclaimable(struct pglist_data *pgdat)
7372 nr_pagecache_reclaimable = node_page_state(pgdat, NR_FILE_PAGES);
7374 nr_pagecache_reclaimable = node_unmapped_file_pages(pgdat);
7378 delta += node_page_state(pgdat, NR_FILE_DIRTY);
7390 static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7408 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
7421 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages ||
7422 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) {
7428 shrink_node(pgdat, &sc);
7443 int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order)
7457 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages &&
7458 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) <=
7459 pgdat->min_slab_pages)
7474 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id())
7477 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags))
7480 ret = __node_reclaim(pgdat, gfp_mask, order);
7481 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags);