Searched refs:pgdat (Results 1 - 25 of 47) sorted by last modified time

12

/linux-master/kernel/sched/
H A Dfair.c1737 static bool pgdat_free_space_enough(struct pglist_data *pgdat) argument
1743 pgdat->node_present_pages >> 4);
1744 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
1745 struct zone *zone = pgdat->node_zones + z;
1785 static bool numa_promotion_rate_limit(struct pglist_data *pgdat, argument
1792 mod_node_page_state(pgdat, PGPROMOTE_CANDIDATE, nr);
1793 nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
1794 start = pgdat->nbp_rl_start;
1796 cmpxchg(&pgdat->nbp_rl_start, start, now) == start)
1797 pgdat
1805 numa_promotion_adjust_threshold(struct pglist_data *pgdat, unsigned long rate_limit, unsigned int ref_th) argument
1851 struct pglist_data *pgdat; local
[all...]
H A Dcore.c4597 struct pglist_data *pgdat; local
4599 for_each_online_pgdat(pgdat) {
4600 pgdat->nbp_threshold = 0;
4601 pgdat->nbp_th_nr_cand = node_page_state(pgdat, PGPROMOTE_CANDIDATE);
4602 pgdat->nbp_th_start = jiffies_to_msecs(jiffies);
/linux-master/mm/
H A Dpage_owner.c419 pg_data_t *pgdat, struct zone *zone)
495 seq_printf(m, "Node %d, zone %8s ", pgdat->node_id, zone->name);
765 static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) argument
832 pgdat->node_id, zone->name, count);
835 static void init_zones_in_node(pg_data_t *pgdat) argument
838 struct zone *node_zones = pgdat->node_zones;
844 init_pages_in_zone(pgdat, zone);
850 pg_data_t *pgdat; local
852 for_each_online_pgdat(pgdat)
853 init_zones_in_node(pgdat);
418 pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) argument
[all...]
H A Dinternal.h179 void __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio,
183 pg_data_t *pgdat = folio_pgdat(folio); local
184 int nr_throttled = atomic_read(&pgdat->nr_writeback_throttled);
187 __acct_reclaim_writeback(pgdat, folio, nr_throttled);
190 static inline void wake_throttle_isolated(pg_data_t *pgdat) argument
194 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_ISOLATED];
299 extern void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason);
936 static inline int node_reclaim(struct pglist_data *pgdat, gfp_t mask, argument
H A Dhuge_memory.c774 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); local
779 return &pgdat->deferred_split_queue;
785 struct pglist_data *pgdat = NODE_DATA(folio_nid(folio)); local
787 return &pgdat->deferred_split_queue;
H A Dworkingset.c199 static void *pack_shadow(int memcgid, pg_data_t *pgdat, unsigned long eviction, argument
204 eviction = (eviction << NODES_SHIFT) | pgdat->node_id;
210 static void unpack_shadow(void *shadow, int *memcgidp, pg_data_t **pgdat, argument
225 *pgdat = NODE_DATA(nid);
244 struct pglist_data *pgdat = folio_pgdat(folio); local
248 lruvec = mem_cgroup_lruvec(memcg, pgdat);
256 return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs);
269 struct pglist_data *pgdat; local
271 unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset);
274 *lruvec = mem_cgroup_lruvec(memcg, pgdat);
384 struct pglist_data *pgdat = folio_pgdat(folio); local
426 struct pglist_data *pgdat; local
533 struct pglist_data *pgdat; local
[all...]
H A Dvmscan.c466 static bool skip_throttle_noprogress(pg_data_t *pgdat) argument
475 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES)
484 struct zone *zone = pgdat->node_zones + i;
499 void reclaim_throttle(pg_data_t *pgdat, enum vmscan_throttle_state reason) argument
501 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason];
530 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) {
531 WRITE_ONCE(pgdat->nr_reclaim_start,
532 node_page_state(pgdat, NR_THROTTLED_WRITTEN));
539 if (skip_throttle_noprogress(pgdat)) {
561 atomic_dec(&pgdat
573 __acct_reclaim_writeback(pg_data_t *pgdat, struct folio *folio, int nr_throttled) argument
954 demote_folio_list(struct list_head *demote_folios, struct pglist_data *pgdat) argument
1011 shrink_folio_list(struct list_head *folio_list, struct pglist_data *pgdat, struct scan_control *sc, struct reclaim_stat *stat, bool ignore_references) argument
1754 too_many_isolated(struct pglist_data *pgdat, int file, struct scan_control *sc) argument
1891 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
2012 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
2093 reclaim_folio_list(struct list_head *folio_list, struct pglist_data *pgdat, bool ignore_references) argument
2220 prepare_scan_control(pg_data_t *pgdat, struct scan_control *sc) argument
2337 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
2535 can_age_anon_pages(struct pglist_data *pgdat, struct scan_control *sc) argument
2591 struct pglist_data *pgdat = NODE_DATA(nid); local
2612 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
2762 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); local
3299 get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, struct pglist_data *pgdat, bool can_swap) argument
3341 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); local
3409 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); local
3516 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); local
3650 set_mm_walk(struct pglist_data *pgdat, bool force_alloc) argument
3938 lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) argument
4001 struct pglist_data *pgdat = folio_pgdat(folio); local
4109 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
4158 struct pglist_data *pgdat = NODE_DATA(nid); local
4193 struct pglist_data *pgdat = NODE_DATA(nid); local
4521 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
4757 struct pglist_data *pgdat = lruvec_pgdat(lruvec); local
4793 shrink_many(struct pglist_data *pgdat, struct scan_control *sc) argument
4884 set_initial_priority(struct pglist_data *pgdat, struct scan_control *sc) argument
4906 lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) argument
5533 lru_gen_init_pgdat(struct pglist_data *pgdat) argument
5624 lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) argument
5634 lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control *sc) argument
5777 should_continue_reclaim(struct pglist_data *pgdat, unsigned long nr_reclaimed, struct scan_control *sc) argument
5829 shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc) argument
5887 shrink_node(pg_data_t *pgdat, struct scan_control *sc) argument
6035 consider_reclaim_throttle(pg_data_t *pgdat, struct scan_control *sc) argument
6165 snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat) argument
6285 allow_direct_reclaim(pg_data_t *pgdat) argument
6339 pg_data_t *pgdat = NULL; local
6460 mem_cgroup_shrink_node(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, pg_data_t *pgdat, unsigned long *nr_scanned) argument
6539 kswapd_age_node(struct pglist_data *pgdat, struct scan_control *sc) argument
6565 pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx) argument
6593 pgdat_balanced(pg_data_t *pgdat, int order, int highest_zoneidx) argument
6629 clear_pgdat_congested(pg_data_t *pgdat) argument
6645 prepare_kswapd_sleep(pg_data_t *pgdat, int order, int highest_zoneidx) argument
6684 kswapd_shrink_node(pg_data_t *pgdat, struct scan_control *sc) argument
6721 update_reclaim_active(pg_data_t *pgdat, int highest_zoneidx, bool active) argument
6740 set_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) argument
6746 clear_reclaim_active(pg_data_t *pgdat, int highest_zoneidx) argument
6764 balance_pgdat(pg_data_t *pgdat, int order, int highest_zoneidx) argument
6992 kswapd_highest_zoneidx(pg_data_t *pgdat, enum zone_type prev_highest_zoneidx) argument
7000 kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, unsigned int highest_zoneidx) argument
7101 pg_data_t *pgdat = (pg_data_t *)p; local
7185 pg_data_t *pgdat; local
7271 pg_data_t *pgdat = NODE_DATA(nid); local
7293 pg_data_t *pgdat = NODE_DATA(nid); local
7345 node_unmapped_file_pages(struct pglist_data *pgdat) argument
7360 node_pagecache_reclaimable(struct pglist_data *pgdat) argument
7390 __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
7443 node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) argument
[all...]
H A Dsparse.c321 static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat) argument
324 VM_BUG_ON(pgdat != &contig_page_data);
327 return __pa(pgdat);
332 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, argument
342 * a pgdat can prevent a section being removed. If section A
343 * contains a pgdat and section B contains the usemap, both
345 * from the same section as the pgdat where possible to avoid
348 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
366 struct pglist_data *pgdat = NODE_DATA(nid); local
376 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIF
404 sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) argument
[all...]
H A Dpage_alloc.c4965 static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs) argument
4973 zone = pgdat->node_zones + zone_type;
5083 static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order, argument
5089 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
5106 static void build_thisnode_zonelists(pg_data_t *pgdat) argument
5111 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
5112 nr_zones = build_zonerefs_node(pgdat, zonerefs);
5125 static void build_zonelists(pg_data_t *pgdat) argument
5133 local_node = pgdat->node_id;
5151 build_zonelists_in_node_order(pgdat, node_orde
5181 build_zonelists(pg_data_t *pgdat) argument
5278 pg_data_t *pgdat = NODE_DATA(nid); local
5334 build_all_zonelists(pg_data_t *pgdat) argument
5629 struct pglist_data *pgdat; local
5777 struct pglist_data *pgdat; local
5818 struct pglist_data *pgdat; local
6029 pg_data_t *pgdat; local
6057 pg_data_t *pgdat; local
6782 struct pglist_data *pgdat; local
[all...]
H A Dpage-writeback.c267 * @pgdat: the node
272 static unsigned long node_dirtyable_memory(struct pglist_data *pgdat) argument
278 struct zone *zone = pgdat->node_zones + z;
291 nr_pages -= min(nr_pages, pgdat->totalreserve_pages);
293 nr_pages += node_page_state(pgdat, NR_INACTIVE_FILE);
294 nr_pages += node_page_state(pgdat, NR_ACTIVE_FILE);
454 * @pgdat: the node
459 static unsigned long node_dirty_limit(struct pglist_data *pgdat) argument
461 unsigned long node_memory = node_dirtyable_memory(pgdat);
479 * @pgdat
484 node_dirty_ok(struct pglist_data *pgdat) argument
[all...]
H A Dmm_init.c48 pg_data_t *pgdat = NODE_DATA(nid); local
60 zonelist = &pgdat->node_zonelists[listid];
61 zone = &pgdat->node_zones[zoneid];
653 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) argument
655 pgdat->first_deferred_pfn = ULONG_MAX;
708 pg_data_t *pgdat; local
714 pgdat = NODE_DATA(nid);
717 struct zone *zone = &pgdat->node_zones[zid];
725 static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} argument
1070 struct pglist_data *pgdat local
1250 reset_memoryless_node_totalpages(struct pglist_data *pgdat) argument
1268 calculate_node_totalpages(struct pglist_data *pgdat, unsigned long node_start_pfn, unsigned long node_end_pfn) argument
1332 pgdat_init_split_queue(struct pglist_data *pgdat) argument
1341 pgdat_init_split_queue(struct pglist_data *pgdat) argument
1345 pgdat_init_kcompactd(struct pglist_data *pgdat) argument
1350 pgdat_init_kcompactd(struct pglist_data *pgdat) argument
1353 pgdat_init_internals(struct pglist_data *pgdat) argument
1402 struct pglist_data *pgdat = zone->zone_pgdat; local
1502 free_area_init_core_hotplug(struct pglist_data *pgdat) argument
1554 free_area_init_core(struct pglist_data *pgdat) argument
1635 alloc_node_mem_map(struct pglist_data *pgdat) argument
1672 alloc_node_mem_map(struct pglist_data *pgdat) argument
1705 pg_data_t *pgdat = NODE_DATA(nid); local
1738 check_for_memory(pg_data_t *pgdat) argument
1868 pg_data_t *pgdat; local
2177 pg_data_t *pgdat = data; local
2270 pg_data_t *pgdat = zone->zone_pgdat; local
[all...]
H A Dmigrate.c2472 static bool migrate_balanced_pgdat(struct pglist_data *pgdat, argument
2477 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2478 struct zone *zone = pgdat->node_zones + z;
2511 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio) argument
2516 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2521 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2522 if (managed_zone(pgdat->node_zones + z))
2533 wakeup_kswapd(pgdat->node_zones + z, 0,
2561 pg_data_t *pgdat = NODE_DATA(node); local
2586 isolated = numamigrate_isolate_folio(pgdat, foli
[all...]
H A Dmemory_hotplug.c499 static void update_pgdat_span(struct pglist_data *pgdat) argument
504 for (zone = pgdat->node_zones;
505 zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
523 pgdat->node_start_pfn = node_start_pfn;
524 pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
532 struct pglist_data *pgdat = zone->zone_pgdat; local
557 update_pgdat_span(pgdat);
716 static void __meminit resize_pgdat_range(struct pglist_data *pgdat, unsigned long start_pfn, argument
719 unsigned long old_end_pfn = pgdat_end_pfn(pgdat);
721 if (!pgdat
754 struct pglist_data *pgdat = zone->zone_pgdat; local
849 pg_data_t *pgdat = NODE_DATA(nid); local
900 struct pglist_data *pgdat = NODE_DATA(nid); local
1239 struct pglist_data *pgdat; local
1274 pg_data_t *pgdat; local
1887 struct pglist_data *pgdat = zone->zone_pgdat; local
[all...]
H A Dmemory-tiers.c235 pg_data_t *pgdat; local
237 pgdat = NODE_DATA(node);
238 if (!pgdat)
245 return rcu_dereference_check(pgdat->memtier,
253 pg_data_t *pgdat; local
256 pgdat = NODE_DATA(node);
257 if (!pgdat)
261 memtier = rcu_dereference(pgdat->memtier);
275 void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets) argument
285 memtier = rcu_dereference(pgdat
509 pg_data_t *pgdat = NODE_DATA(node); local
536 pg_data_t *pgdat; local
[all...]
H A Dmemcontrol.c904 pg_data_t *pgdat = folio_pgdat(folio); local
912 __mod_node_page_state(pgdat, idx, val);
916 lruvec = mem_cgroup_lruvec(memcg, pgdat);
924 pg_data_t *pgdat = page_pgdat(virt_to_page(p)); local
938 __mod_node_page_state(pgdat, idx, val);
940 lruvec = mem_cgroup_lruvec(memcg, pgdat);
1176 mz = root->nodeinfo[reclaim->pgdat->node_id];
1825 pg_data_t *pgdat,
1835 .pgdat = pgdat,
1824 mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, pg_data_t *pgdat, gfp_t gfp_mask, unsigned long *total_scanned) argument
2993 mod_objcg_mlstate(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) argument
3359 mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) argument
3724 mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
4359 pg_data_t *pgdat; local
5988 struct pglist_data *pgdat; local
[all...]
H A Dcompaction.c453 void reset_isolation_suitable(pg_data_t *pgdat) argument
458 struct zone *zone = &pgdat->node_zones[zoneid];
817 pg_data_t *pgdat = cc->zone->zone_pgdat; local
822 inactive = node_page_state(pgdat, NR_INACTIVE_FILE) +
823 node_page_state(pgdat, NR_INACTIVE_ANON);
824 active = node_page_state(pgdat, NR_ACTIVE_FILE) +
825 node_page_state(pgdat, NR_ACTIVE_ANON);
826 isolated = node_page_state(pgdat, NR_ISOLATED_FILE) +
827 node_page_state(pgdat, NR_ISOLATED_ANON);
842 wake_throttle_isolated(pgdat);
894 pg_data_t *pgdat = cc->zone->zone_pgdat; local
2203 kswapd_is_running(pg_data_t *pgdat) argument
2248 fragmentation_score_node(pg_data_t *pgdat) argument
2278 should_proactive_compact_node(pg_data_t *pgdat) argument
2317 pg_data_t *pgdat; local
2900 compact_node(pg_data_t *pgdat, bool proactive) argument
2964 pg_data_t *pgdat = NODE_DATA(nid); local
3030 kcompactd_work_requested(pg_data_t *pgdat) argument
3036 kcompactd_node_suitable(pg_data_t *pgdat) argument
3059 kcompactd_do_work(pg_data_t *pgdat) argument
3137 wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) argument
3169 pg_data_t *pgdat = (pg_data_t *)p; local
3244 pg_data_t *pgdat = NODE_DATA(nid); local
3281 pg_data_t *pgdat = NODE_DATA(nid); local
[all...]
/linux-master/include/linux/
H A Dnode.h131 struct pglist_data *pgdat = NODE_DATA(nid); local
132 unsigned long start_pfn = pgdat->node_start_pfn;
133 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
H A Dwriteback.h339 bool node_dirty_ok(struct pglist_data *pgdat);
H A Dswap.h403 pg_data_t *pgdat,
H A Dpage_owner.h20 pg_data_t *pgdat, struct zone *zone);
H A Dmmzone.h555 void lru_gen_init_pgdat(struct pglist_data *pgdat);
568 static inline void lru_gen_init_pgdat(struct pglist_data *pgdat) argument
632 struct pglist_data *pgdat; member in struct:lruvec
1301 * Also synchronizes pgdat->first_deferred_pfn during deferred page
1423 static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat) argument
1425 return pgdat->node_start_pfn + pgdat->node_spanned_pages;
1430 void build_all_zonelists(pg_data_t *pgdat);
1458 return lruvec->pgdat;
1572 extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
[all...]
H A Dmemcontrol.h59 pg_data_t *pgdat; member in struct:mem_cgroup_reclaim_cookie
731 * @pgdat: pglist_data
734 * @pgdat combination. This can be the node lruvec, if the memory
738 struct pglist_data *pgdat)
744 lruvec = &pgdat->__lruvec;
751 mz = memcg->nodeinfo[pgdat->node_id];
756 * we have to be prepared to initialize lruvec->pgdat here;
759 if (unlikely(lruvec->pgdat != pgdat))
760 lruvec->pgdat
737 mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) argument
1314 mem_cgroup_lruvec(struct mem_cgroup *memcg, struct pglist_data *pgdat) argument
1322 struct pglist_data *pgdat = folio_pgdat(folio); local
1378 struct pglist_data *pgdat = folio_pgdat(folio); local
1386 struct pglist_data *pgdat = folio_pgdat(folio); local
1395 struct pglist_data *pgdat = folio_pgdat(folio); local
1627 mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned) argument
[all...]
H A Dmemory_hotplug.h21 * For supporting node-hotadd, we have to allocate a new pgdat.
30 extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
39 * Because, pgdat for the new node is not allocated/initialized yet itself.
44 memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \
48 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument
50 node_data[nid] = pgdat;
61 static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) argument
230 static inline void pgdat_kswapd_lock(pg_data_t *pgdat) argument
232 mutex_lock(&pgdat->kswapd_lock);
235 static inline void pgdat_kswapd_unlock(pg_data_t *pgdat) argument
240 pgdat_kswapd_lock_init(pg_data_t *pgdat) argument
287 pgdat_kswapd_lock(pg_data_t *pgdat) argument
288 pgdat_kswapd_unlock(pg_data_t *pgdat) argument
289 pgdat_kswapd_lock_init(pg_data_t *pgdat) argument
304 pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags) argument
309 pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags) argument
314 pgdat_resize_init(struct pglist_data *pgdat) argument
324 pgdat_resize_init(struct pglist_data *pgdat) argument
[all...]
/linux-master/drivers/base/
H A Dnode.c375 struct pglist_data *pgdat = NODE_DATA(nid); local
381 sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
382 sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
384 swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
403 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
404 node_page_state(pgdat, NR_ACTIVE_FILE)),
405 nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
406 node_page_state(pgdat, NR_INACTIVE_FILE)),
407 nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
408 nid, K(node_page_state(pgdat, NR_INACTIVE_ANO
520 struct pglist_data *pgdat = NODE_DATA(nid); local
[all...]
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_crat.c1753 pg_data_t *pgdat; local
1773 pgdat = NODE_DATA(numa_node_id);
1775 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);

Completed in 457 milliseconds

12