1/*
2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc.  All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 *	Russ Anderson <rja@sgi.com>
9 *	Jesse Barnes <jbarnes@sgi.com>
10 *	Jack Steiner <steiner@sgi.com>
11 */
12
13/*
14 * Platform initialization for Discontig Memory
15 */
16
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/bootmem.h>
21#include <linux/acpi.h>
22#include <linux/efi.h>
23#include <linux/nodemask.h>
24#include <asm/pgalloc.h>
25#include <asm/tlb.h>
26#include <asm/meminit.h>
27#include <asm/numa.h>
28#include <asm/sections.h>
29
30/*
31 * Track per-node information needed to setup the boot memory allocator, the
32 * per-node areas, and the real VM.
33 */
34struct early_node_data {
35	struct ia64_node_data *node_data;
36	unsigned long pernode_addr;
37	unsigned long pernode_size;
38	struct bootmem_data bootmem_data;
39	unsigned long num_physpages;
40#ifdef CONFIG_ZONE_DMA
41	unsigned long num_dma_physpages;
42#endif
43	unsigned long min_pfn;
44	unsigned long max_pfn;
45};
46
47static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
48static nodemask_t memory_less_mask __initdata;
49
50static pg_data_t *pgdat_list[MAX_NUMNODES];
51
52/*
53 * To prevent cache aliasing effects, align per-node structures so that they
54 * start at addresses that are strided by node number.
55 */
56#define MAX_NODE_ALIGN_OFFSET	(32 * 1024 * 1024)
57#define NODEDATA_ALIGN(addr, node)						\
58	((((addr) + 1024*1024-1) & ~(1024*1024-1)) + 				\
59	     (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
60
61/**
62 * build_node_maps - callback to setup bootmem structs for each node
63 * @start: physical start of range
64 * @len: length of range
65 * @node: node where this range resides
66 *
67 * We allocate a struct bootmem_data for each piece of memory that we wish to
68 * treat as a virtually contiguous block (i.e. each node). Each such block
69 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
70 * if necessary.  Any non-existent pages will simply be part of the virtual
71 * memmap.  We also update min_low_pfn and max_low_pfn here as we receive
72 * memory ranges from the caller.
73 */
74static int __init build_node_maps(unsigned long start, unsigned long len,
75				  int node)
76{
77	unsigned long cstart, epfn, end = start + len;
78	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
79
80	epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
81	cstart = GRANULEROUNDDOWN(start);
82
83	if (!bdp->node_low_pfn) {
84		bdp->node_boot_start = cstart;
85		bdp->node_low_pfn = epfn;
86	} else {
87		bdp->node_boot_start = min(cstart, bdp->node_boot_start);
88		bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
89	}
90
91	return 0;
92}
93
94/**
95 * early_nr_cpus_node - return number of cpus on a given node
96 * @node: node to check
97 *
98 * Count the number of cpus on @node.  We can't use nr_cpus_node() yet because
99 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
100 * called yet.  Note that node 0 will also count all non-existent cpus.
101 */
102static int __meminit early_nr_cpus_node(int node)
103{
104	int cpu, n = 0;
105
106	for (cpu = 0; cpu < NR_CPUS; cpu++)
107		if (node == node_cpuid[cpu].nid)
108			n++;
109
110	return n;
111}
112
113/**
114 * compute_pernodesize - compute size of pernode data
115 * @node: the node id.
116 */
117static unsigned long __meminit compute_pernodesize(int node)
118{
119	unsigned long pernodesize = 0, cpus;
120
121	cpus = early_nr_cpus_node(node);
122	pernodesize += PERCPU_PAGE_SIZE * cpus;
123	pernodesize += node * L1_CACHE_BYTES;
124	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
125	pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
126	pernodesize = PAGE_ALIGN(pernodesize);
127	return pernodesize;
128}
129
130/**
131 * per_cpu_node_setup - setup per-cpu areas on each node
132 * @cpu_data: per-cpu area on this node
133 * @node: node to setup
134 *
135 * Copy the static per-cpu data into the region we just set aside and then
136 * setup __per_cpu_offset for each CPU on this node.  Return a pointer to
137 * the end of the area.
138 */
139static void *per_cpu_node_setup(void *cpu_data, int node)
140{
141#ifdef CONFIG_SMP
142	int cpu;
143
144	for (cpu = 0; cpu < NR_CPUS; cpu++) {
145		if (node == node_cpuid[cpu].nid) {
146			memcpy(__va(cpu_data), __phys_per_cpu_start,
147			       __per_cpu_end - __per_cpu_start);
148			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
149				__per_cpu_start;
150			cpu_data += PERCPU_PAGE_SIZE;
151		}
152	}
153#endif
154	return cpu_data;
155}
156
157/**
158 * fill_pernode - initialize pernode data.
159 * @node: the node id.
160 * @pernode: physical address of pernode data
161 * @pernodesize: size of the pernode data
162 */
163static void __init fill_pernode(int node, unsigned long pernode,
164	unsigned long pernodesize)
165{
166	void *cpu_data;
167	int cpus = early_nr_cpus_node(node);
168	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
169
170	mem_data[node].pernode_addr = pernode;
171	mem_data[node].pernode_size = pernodesize;
172	memset(__va(pernode), 0, pernodesize);
173
174	cpu_data = (void *)pernode;
175	pernode += PERCPU_PAGE_SIZE * cpus;
176	pernode += node * L1_CACHE_BYTES;
177
178	pgdat_list[node] = __va(pernode);
179	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
180
181	mem_data[node].node_data = __va(pernode);
182	pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
183
184	pgdat_list[node]->bdata = bdp;
185	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
186
187	cpu_data = per_cpu_node_setup(cpu_data, node);
188
189	return;
190}
191
192/**
193 * find_pernode_space - allocate memory for memory map and per-node structures
194 * @start: physical start of range
195 * @len: length of range
196 * @node: node where this range resides
197 *
198 * This routine reserves space for the per-cpu data struct, the list of
199 * pg_data_ts and the per-node data struct.  Each node will have something like
200 * the following in the first chunk of addr. space large enough to hold it.
201 *
202 *    ________________________
203 *   |                        |
204 *   |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
205 *   |    PERCPU_PAGE_SIZE *  |     start and length big enough
206 *   |    cpus_on_this_node   | Node 0 will also have entries for all non-existent cpus.
207 *   |------------------------|
208 *   |   local pg_data_t *    |
209 *   |------------------------|
210 *   |  local ia64_node_data  |
211 *   |------------------------|
212 *   |          ???           |
213 *   |________________________|
214 *
215 * Once this space has been set aside, the bootmem maps are initialized.  We
216 * could probably move the allocation of the per-cpu and ia64_node_data space
217 * outside of this function and use alloc_bootmem_node(), but doing it here
218 * is straightforward and we get the alignments we want so...
219 */
220static int __init find_pernode_space(unsigned long start, unsigned long len,
221				     int node)
222{
223	unsigned long epfn;
224	unsigned long pernodesize = 0, pernode, pages, mapsize;
225	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
226
227	epfn = (start + len) >> PAGE_SHIFT;
228
229	pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
230	mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
231
232	/*
233	 * Make sure this memory falls within this node's usable memory
234	 * since we may have thrown some away in build_maps().
235	 */
236	if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
237		return 0;
238
239	/* Don't setup this node's local space twice... */
240	if (mem_data[node].pernode_addr)
241		return 0;
242
243	/*
244	 * Calculate total size needed, incl. what's necessary
245	 * for good alignment and alias prevention.
246	 */
247	pernodesize = compute_pernodesize(node);
248	pernode = NODEDATA_ALIGN(start, node);
249
250	/* Is this range big enough for what we want to store here? */
251	if (start + len > (pernode + pernodesize + mapsize))
252		fill_pernode(node, pernode, pernodesize);
253
254	return 0;
255}
256
257/**
258 * free_node_bootmem - free bootmem allocator memory for use
259 * @start: physical start of range
260 * @len: length of range
261 * @node: node where this range resides
262 *
263 * Simply calls the bootmem allocator to free the specified ranged from
264 * the given pg_data_t's bdata struct.  After this function has been called
265 * for all the entries in the EFI memory map, the bootmem allocator will
266 * be ready to service allocation requests.
267 */
268static int __init free_node_bootmem(unsigned long start, unsigned long len,
269				    int node)
270{
271	free_bootmem_node(pgdat_list[node], start, len);
272
273	return 0;
274}
275
276/**
277 * reserve_pernode_space - reserve memory for per-node space
278 *
279 * Reserve the space used by the bootmem maps & per-node space in the boot
280 * allocator so that when we actually create the real mem maps we don't
281 * use their memory.
282 */
283static void __init reserve_pernode_space(void)
284{
285	unsigned long base, size, pages;
286	struct bootmem_data *bdp;
287	int node;
288
289	for_each_online_node(node) {
290		pg_data_t *pdp = pgdat_list[node];
291
292		if (node_isset(node, memory_less_mask))
293			continue;
294
295		bdp = pdp->bdata;
296
297		/* First the bootmem_map itself */
298		pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
299		size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
300		base = __pa(bdp->node_bootmem_map);
301		reserve_bootmem_node(pdp, base, size);
302
303		/* Now the per-node space */
304		size = mem_data[node].pernode_size;
305		base = __pa(mem_data[node].pernode_addr);
306		reserve_bootmem_node(pdp, base, size);
307	}
308}
309
310static void __meminit scatter_node_data(void)
311{
312	pg_data_t **dst;
313	int node;
314
315	/*
316	 * for_each_online_node() can't be used at here.
317	 * node_online_map is not set for hot-added nodes at this time,
318	 * because we are halfway through initialization of the new node's
319	 * structures.  If for_each_online_node() is used, a new node's
320	 * pg_data_ptrs will be not initialized. Instead of using it,
321	 * pgdat_list[] is checked.
322	 */
323	for_each_node(node) {
324		if (pgdat_list[node]) {
325			dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
326			memcpy(dst, pgdat_list, sizeof(pgdat_list));
327		}
328	}
329}
330
331/**
332 * initialize_pernode_data - fixup per-cpu & per-node pointers
333 *
334 * Each node's per-node area has a copy of the global pg_data_t list, so
335 * we copy that to each node here, as well as setting the per-cpu pointer
336 * to the local node data structure.  The active_cpus field of the per-node
337 * structure gets setup by the platform_cpu_init() function later.
338 */
339static void __init initialize_pernode_data(void)
340{
341	int cpu, node;
342
343	scatter_node_data();
344
345#ifdef CONFIG_SMP
346	/* Set the node_data pointer for each per-cpu struct */
347	for (cpu = 0; cpu < NR_CPUS; cpu++) {
348		node = node_cpuid[cpu].nid;
349		per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
350	}
351#else
352	{
353		struct cpuinfo_ia64 *cpu0_cpu_info;
354		cpu = 0;
355		node = node_cpuid[cpu].nid;
356		cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
357			((char *)&per_cpu__cpu_info - __per_cpu_start));
358		cpu0_cpu_info->node_data = mem_data[node].node_data;
359	}
360#endif /* CONFIG_SMP */
361}
362
363/**
364 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
365 * 	node but fall back to any other node when __alloc_bootmem_node fails
366 *	for best.
367 * @nid: node id
368 * @pernodesize: size of this node's pernode data
369 */
370static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
371{
372	void *ptr = NULL;
373	u8 best = 0xff;
374	int bestnode = -1, node, anynode = 0;
375
376	for_each_online_node(node) {
377		if (node_isset(node, memory_less_mask))
378			continue;
379		else if (node_distance(nid, node) < best) {
380			best = node_distance(nid, node);
381			bestnode = node;
382		}
383		anynode = node;
384	}
385
386	if (bestnode == -1)
387		bestnode = anynode;
388
389	ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
390		PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
391
392	return ptr;
393}
394
395/**
396 * memory_less_nodes - allocate and initialize CPU only nodes pernode
397 *	information.
398 */
399static void __init memory_less_nodes(void)
400{
401	unsigned long pernodesize;
402	void *pernode;
403	int node;
404
405	for_each_node_mask(node, memory_less_mask) {
406		pernodesize = compute_pernodesize(node);
407		pernode = memory_less_node_alloc(node, pernodesize);
408		fill_pernode(node, __pa(pernode), pernodesize);
409	}
410
411	return;
412}
413
414/**
415 * find_memory - walk the EFI memory map and setup the bootmem allocator
416 *
417 * Called early in boot to setup the bootmem allocator, and to
418 * allocate the per-cpu and per-node structures.
419 */
420void __init find_memory(void)
421{
422	int node;
423
424	reserve_memory();
425
426	if (num_online_nodes() == 0) {
427		printk(KERN_ERR "node info missing!\n");
428		node_set_online(0);
429	}
430
431	nodes_or(memory_less_mask, memory_less_mask, node_online_map);
432	min_low_pfn = -1;
433	max_low_pfn = 0;
434
435	/* These actually end up getting called by call_pernode_memory() */
436	efi_memmap_walk(filter_rsvd_memory, build_node_maps);
437	efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
438	efi_memmap_walk(find_max_min_low_pfn, NULL);
439
440	for_each_online_node(node)
441		if (mem_data[node].bootmem_data.node_low_pfn) {
442			node_clear(node, memory_less_mask);
443			mem_data[node].min_pfn = ~0UL;
444		}
445
446	efi_memmap_walk(register_active_ranges, NULL);
447
448	/*
449	 * Initialize the boot memory maps in reverse order since that's
450	 * what the bootmem allocator expects
451	 */
452	for (node = MAX_NUMNODES - 1; node >= 0; node--) {
453		unsigned long pernode, pernodesize, map;
454		struct bootmem_data *bdp;
455
456		if (!node_online(node))
457			continue;
458		else if (node_isset(node, memory_less_mask))
459			continue;
460
461		bdp = &mem_data[node].bootmem_data;
462		pernode = mem_data[node].pernode_addr;
463		pernodesize = mem_data[node].pernode_size;
464		map = pernode + pernodesize;
465
466		init_bootmem_node(pgdat_list[node],
467				  map>>PAGE_SHIFT,
468				  bdp->node_boot_start>>PAGE_SHIFT,
469				  bdp->node_low_pfn);
470	}
471
472	efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
473
474	reserve_pernode_space();
475	memory_less_nodes();
476	initialize_pernode_data();
477
478	max_pfn = max_low_pfn;
479
480	find_initrd();
481}
482
483#ifdef CONFIG_SMP
484/**
485 * per_cpu_init - setup per-cpu variables
486 *
487 * find_pernode_space() does most of this already, we just need to set
488 * local_per_cpu_offset
489 */
490void __cpuinit *per_cpu_init(void)
491{
492	int cpu;
493	static int first_time = 1;
494
495
496	if (smp_processor_id() != 0)
497		return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
498
499	if (first_time) {
500		first_time = 0;
501		for (cpu = 0; cpu < NR_CPUS; cpu++)
502			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
503	}
504
505	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
506}
507#endif /* CONFIG_SMP */
508
509/**
510 * show_mem - give short summary of memory stats
511 *
512 * Shows a simple page count of reserved and used pages in the system.
513 * For discontig machines, it does this on a per-pgdat basis.
514 */
515void show_mem(void)
516{
517	int i, total_reserved = 0;
518	int total_shared = 0, total_cached = 0;
519	unsigned long total_present = 0;
520	pg_data_t *pgdat;
521
522	printk(KERN_INFO "Mem-info:\n");
523	show_free_areas();
524	printk(KERN_INFO "Free swap:       %6ldkB\n",
525	       nr_swap_pages<<(PAGE_SHIFT-10));
526	printk(KERN_INFO "Node memory in pages:\n");
527	for_each_online_pgdat(pgdat) {
528		unsigned long present;
529		unsigned long flags;
530		int shared = 0, cached = 0, reserved = 0;
531
532		pgdat_resize_lock(pgdat, &flags);
533		present = pgdat->node_present_pages;
534		for(i = 0; i < pgdat->node_spanned_pages; i++) {
535			struct page *page;
536			if (pfn_valid(pgdat->node_start_pfn + i))
537				page = pfn_to_page(pgdat->node_start_pfn + i);
538			else {
539				i = vmemmap_find_next_valid_pfn(pgdat->node_id,
540					 i) - 1;
541				continue;
542			}
543			if (PageReserved(page))
544				reserved++;
545			else if (PageSwapCache(page))
546				cached++;
547			else if (page_count(page))
548				shared += page_count(page)-1;
549		}
550		pgdat_resize_unlock(pgdat, &flags);
551		total_present += present;
552		total_reserved += reserved;
553		total_cached += cached;
554		total_shared += shared;
555		printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
556		       "shrd: %10d, swpd: %10d\n", pgdat->node_id,
557		       present, reserved, shared, cached);
558	}
559	printk(KERN_INFO "%ld pages of RAM\n", total_present);
560	printk(KERN_INFO "%d reserved pages\n", total_reserved);
561	printk(KERN_INFO "%d pages shared\n", total_shared);
562	printk(KERN_INFO "%d pages swap cached\n", total_cached);
563	printk(KERN_INFO "Total of %ld pages in page table cache\n",
564	       quicklist_total_size());
565	printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
566}
567
568/**
569 * call_pernode_memory - use SRAT to call callback functions with node info
570 * @start: physical start of range
571 * @len: length of range
572 * @arg: function to call for each range
573 *
574 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
575 * out to which node a block of memory belongs.  Ignore memory that we cannot
576 * identify, and split blocks that run across multiple nodes.
577 *
578 * Take this opportunity to round the start address up and the end address
579 * down to page boundaries.
580 */
581void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
582{
583	unsigned long rs, re, end = start + len;
584	void (*func)(unsigned long, unsigned long, int);
585	int i;
586
587	start = PAGE_ALIGN(start);
588	end &= PAGE_MASK;
589	if (start >= end)
590		return;
591
592	func = arg;
593
594	if (!num_node_memblks) {
595		/* No SRAT table, so assume one node (node 0) */
596		if (start < end)
597			(*func)(start, end - start, 0);
598		return;
599	}
600
601	for (i = 0; i < num_node_memblks; i++) {
602		rs = max(start, node_memblk[i].start_paddr);
603		re = min(end, node_memblk[i].start_paddr +
604			 node_memblk[i].size);
605
606		if (rs < re)
607			(*func)(rs, re - rs, node_memblk[i].nid);
608
609		if (re == end)
610			break;
611	}
612}
613
614/**
615 * count_node_pages - callback to build per-node memory info structures
616 * @start: physical start of range
617 * @len: length of range
618 * @node: node where this range resides
619 *
620 * Each node has it's own number of physical pages, DMAable pages, start, and
621 * end page frame number.  This routine will be called by call_pernode_memory()
622 * for each piece of usable memory and will setup these values for each node.
623 * Very similar to build_maps().
624 */
625static __init int count_node_pages(unsigned long start, unsigned long len, int node)
626{
627	unsigned long end = start + len;
628
629	mem_data[node].num_physpages += len >> PAGE_SHIFT;
630#ifdef CONFIG_ZONE_DMA
631	if (start <= __pa(MAX_DMA_ADDRESS))
632		mem_data[node].num_dma_physpages +=
633			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
634#endif
635	start = GRANULEROUNDDOWN(start);
636	start = ORDERROUNDDOWN(start);
637	end = GRANULEROUNDUP(end);
638	mem_data[node].max_pfn = max(mem_data[node].max_pfn,
639				     end >> PAGE_SHIFT);
640	mem_data[node].min_pfn = min(mem_data[node].min_pfn,
641				     start >> PAGE_SHIFT);
642
643	return 0;
644}
645
646/**
647 * paging_init - setup page tables
648 *
649 * paging_init() sets up the page tables for each node of the system and frees
650 * the bootmem allocator memory for general use.
651 */
652void __init paging_init(void)
653{
654	unsigned long max_dma;
655	unsigned long pfn_offset = 0;
656	unsigned long max_pfn = 0;
657	int node;
658	unsigned long max_zone_pfns[MAX_NR_ZONES];
659
660	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
661
662	efi_memmap_walk(filter_rsvd_memory, count_node_pages);
663
664	sparse_memory_present_with_active_regions(MAX_NUMNODES);
665	sparse_init();
666
667#ifdef CONFIG_VIRTUAL_MEM_MAP
668	vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
669		sizeof(struct page));
670	vmem_map = (struct page *) vmalloc_end;
671	efi_memmap_walk(create_mem_map_page_table, NULL);
672	printk("Virtual mem_map starts at 0x%p\n", vmem_map);
673#endif
674
675	for_each_online_node(node) {
676		num_physpages += mem_data[node].num_physpages;
677		pfn_offset = mem_data[node].min_pfn;
678
679#ifdef CONFIG_VIRTUAL_MEM_MAP
680		NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
681#endif
682		if (mem_data[node].max_pfn > max_pfn)
683			max_pfn = mem_data[node].max_pfn;
684	}
685
686	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
687#ifdef CONFIG_ZONE_DMA
688	max_zone_pfns[ZONE_DMA] = max_dma;
689#endif
690	max_zone_pfns[ZONE_NORMAL] = max_pfn;
691	free_area_init_nodes(max_zone_pfns);
692
693	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
694}
695
696#ifdef CONFIG_MEMORY_HOTPLUG
697pg_data_t *arch_alloc_nodedata(int nid)
698{
699	unsigned long size = compute_pernodesize(nid);
700
701	return kzalloc(size, GFP_KERNEL);
702}
703
704void arch_free_nodedata(pg_data_t *pgdat)
705{
706	kfree(pgdat);
707}
708
709void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
710{
711	pgdat_list[update_node] = update_pgdat;
712	scatter_node_data();
713}
714#endif
715