/linux-master/arch/mips/loongson64/ |
H A D | numa.c | 207 void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument 209 __node_data[nid] = pgdat;
|
/linux-master/arch/mips/sgi-ip27/ |
H A D | ip27-memory.c | 432 void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument 434 __node_data[nid] = (struct node_data *)pgdat;
|
/linux-master/arch/sh/mm/ |
H A D | init.c | 220 panic("Can't allocate pgdat for node %d\n", nid); 345 pg_data_t *pgdat; local 348 for_each_online_pgdat(pgdat) 350 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
|
/linux-master/drivers/base/ |
H A D | memory.c | 708 pg_data_t *pgdat = NODE_DATA(nid); local 720 zone = pgdat->node_zones + i;
|
H A D | node.c | 375 struct pglist_data *pgdat = NODE_DATA(nid); local 381 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B); 382 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B); 384 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE); 403 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) + 404 node_page_state(pgdat, NR_ACTIVE_FILE)), 405 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) + 406 node_page_state(pgdat, NR_INACTIVE_FILE)), 407 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)), 408 nid, K(node_page_state(pgdat, NR_INACTIVE_ANO 520 struct pglist_data *pgdat = NODE_DATA(nid); local [all...] |
/linux-master/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_acpi.c | 853 pg_data_t *pgdat = NODE_DATA(nid); local 857 zone_managed_pages(&pgdat->node_zones[zone_type]);
|
/linux-master/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_crat.c | 1753 pg_data_t *pgdat; local 1773 pgdat = NODE_DATA(numa_node_id); 1775 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
|
/linux-master/include/linux/ |
H A D | bootmem_info.h | 21 void __init register_page_bootmem_info_node(struct pglist_data *pgdat); 48 static inline void register_page_bootmem_info_node(struct pglist_data *pgdat) argument
|
H A D | compaction.h | 91 extern void reset_isolation_suitable(pg_data_t *pgdat); 103 extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); 106 static inline void reset_isolation_suitable(pg_data_t *pgdat) argument 123 static inline void wakeup_kcompactd(pg_data_t *pgdat, argument
|
H A D | memcontrol.h | 59 pg_data_t *pgdat; member in struct:mem_cgroup_reclaim_cookie 731 * @pgdat: pglist_data 734 * @pgdat combination. This can be the node lruvec, if the memory 738 struct pglist_data *pgdat) 744 lruvec = &pgdat->__lruvec; 751 mz = memcg->nodeinfo[pgdat->node_id]; 756 * we have to be prepared to initialize lruvec->pgdat here; 759 if (unlikely(lruvec->pgdat != pgdat)) 760 lruvec->pgdat 737 mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) argument 1314 mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) argument 1322 struct pglist_data *pgdat = folio_pgdat(folio); local 1378 struct pglist_data *pgdat = folio_pgdat(folio); local 1386 struct pglist_data *pgdat = folio_pgdat(folio); local 1395 struct pglist_data *pgdat = folio_pgdat(folio); local 1627 mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument [all...] |
H A D | memory-tiers.h | 53 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets); 61 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) argument 104 static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) argument
|
H A D | memory_hotplug.h | 21 * For supporting node-hotadd, we have to allocate a new pgdat. 30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); 39 * Because, pgdat for the new node is not allocated/initialized yet itself. 44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ 48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument 50 node_data[nid] = pgdat; 61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument 230 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) argument 232 mutex_lock(&pgdat->kswapd_lock); 235 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) argument 240 pgdat_kswapd_lock_init(pg_data_t *pgdat) argument 287 pgdat_kswapd_lock(pg_data_t *pgdat) argument 288 pgdat_kswapd_unlock(pg_data_t *pgdat) argument 289 pgdat_kswapd_lock_init(pg_data_t *pgdat) argument 304 pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) argument 309 pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) argument 314 pgdat_resize_init(struct pglist_data *pgdat) argument 324 pgdat_resize_init(struct pglist_data *pgdat) argument [all...] |
H A D | mm_inline.h | 42 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local 48 __mod_zone_page_state(&pgdat->node_zones[zid],
|
H A D | mmzone.h | 555 void lru_gen_init_pgdat(struct pglist_data *pgdat); 568 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) argument 632 struct pglist_data *pgdat; member in struct:lruvec 1301 * Also synchronizes pgdat->first_deferred_pfn during deferred page 1423 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) argument 1425 return pgdat->node_start_pfn + pgdat->node_spanned_pages; 1430 void build_all_zonelists(pg_data_t *pgdat); 1458 return lruvec->pgdat; 1572 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); [all...] |
H A D | node.h | 131 struct pglist_data *pgdat = NODE_DATA(nid); local 132 unsigned long start_pfn = pgdat->node_start_pfn; 133 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
|
H A D | page_ext.h | 58 extern void pgdat_page_ext_init(struct pglist_data *pgdat); 105 static inline void pgdat_page_ext_init(struct pglist_data *pgdat) argument
|
H A D | page_owner.h | 20 pg_data_t *pgdat, struct zone *zone);
|
H A D | swap.h | 403 pg_data_t *pgdat,
|
H A D | vmstat.h | 172 static inline void node_page_state_add(long x, struct pglist_data *pgdat, argument 175 atomic_long_add(x, &pgdat->vm_stat[item]); 261 extern unsigned long node_page_state(struct pglist_data *pgdat, 263 extern unsigned long node_page_state_pages(struct pglist_data *pgdat, 311 void set_pgdat_percpu_threshold(pg_data_t *pgdat, 325 static inline void __mod_node_page_state(struct pglist_data *pgdat, argument 339 node_page_state_add(delta, pgdat, item); 348 static inline void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) argument 350 atomic_long_inc(&pgdat->vm_stat[item]); 360 static inline void __dec_node_state(struct pglist_data *pgdat, enu argument [all...] |
H A D | writeback.h | 339 bool node_dirty_ok(struct pglist_data *pgdat);
|
/linux-master/kernel/sched/ |
H A D | core.c | 4597 struct pglist_data *pgdat; local 4599 for_each_online_pgdat(pgdat) { 4600 pgdat->nbp_threshold = 0; 4601 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 4602 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
|
H A D | fair.c | 1737 static bool pgdat_free_space_enough(struct pglist_data *pgdat) argument 1743 pgdat->node_present_pages >> 4); 1744 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1745 struct zone *zone = pgdat->node_zones + z; 1785 static bool numa_promotion_rate_limit(struct pglist_data *pgdat, argument 1792 mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr); 1793 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE); 1794 start = pgdat->nbp_rl_start; 1796 cmpxchg(&pgdat->nbp_rl_start, start, now) == start) 1797 pgdat 1805 numa_promotion_adjust_threshold(struct pglist_data *pgdat, unsigned long rate_limit, unsigned int ref_th) argument 1851 struct pglist_data *pgdat; local [all...] |
/linux-master/mm/ |
H A D | bootmem_info.c | 102 void __init register_page_bootmem_info_node(struct pglist_data *pgdat) argument 105 int node = pgdat->node_id; 109 page = virt_to_page(pgdat); 114 pfn = pgdat->node_start_pfn; 115 end_pfn = pgdat_end_pfn(pgdat);
|
H A D | compaction.c | 453 void reset_isolation_suitable(pg_data_t *pgdat) argument 458 struct zone *zone = &pgdat->node_zones[zoneid]; 817 pg_data_t *pgdat = cc->zone->zone_pgdat; local 822 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) + 823 node_page_state(pgdat, NR_INACTIVE_ANON); 824 active = node_page_state(pgdat, NR_ACTIVE_FILE) + 825 node_page_state(pgdat, NR_ACTIVE_ANON); 826 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + 827 node_page_state(pgdat, NR_ISOLATED_ANON); 842 wake_throttle_isolated(pgdat); 894 pg_data_t *pgdat = cc->zone->zone_pgdat; local 2203 kswapd_is_running(pg_data_t *pgdat) argument 2248 fragmentation_score_node(pg_data_t *pgdat) argument 2278 should_proactive_compact_node(pg_data_t *pgdat) argument 2317 pg_data_t *pgdat; local 2900 compact_node(pg_data_t *pgdat, bool proactive) argument 2964 pg_data_t *pgdat = NODE_DATA(nid); local 3030 kcompactd_work_requested(pg_data_t *pgdat) argument 3036 kcompactd_node_suitable(pg_data_t *pgdat) argument 3059 kcompactd_do_work(pg_data_t *pgdat) argument 3137 wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) argument 3169 pg_data_t *pgdat = (pg_data_t *)p; local 3244 pg_data_t *pgdat = NODE_DATA(nid); local 3281 pg_data_t *pgdat = NODE_DATA(nid); local [all...] |
H A D | huge_memory.c | 774 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); local 779 return &pgdat->deferred_split_queue; 785 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); local 787 return &pgdat->deferred_split_queue;
|