• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/arch/x86/mm/
1/*
2 * Some of the code in this file has been gleaned from the 64 bit
3 * discontigmem support code base.
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT.  See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Send feedback to Pat Gaughen <gone@us.ibm.com>
25 */
26#include <linux/mm.h>
27#include <linux/bootmem.h>
28#include <linux/mmzone.h>
29#include <linux/acpi.h>
30#include <linux/nodemask.h>
31#include <asm/srat.h>
32#include <asm/topology.h>
33#include <asm/smp.h>
34#include <asm/e820.h>
35
36/*
37 * proximity macros and definitions
38 */
39#define NODE_ARRAY_INDEX(x)	((x) / 8)	/* 8 bits/char */
40#define NODE_ARRAY_OFFSET(x)	((x) % 8)	/* 8 bits/char */
41#define BMAP_SET(bmap, bit)	((bmap)[NODE_ARRAY_INDEX(bit)] |= 1 << NODE_ARRAY_OFFSET(bit))
42#define BMAP_TEST(bmap, bit)	((bmap)[NODE_ARRAY_INDEX(bit)] & (1 << NODE_ARRAY_OFFSET(bit)))
43/* bitmap length; _PXM is at most 255 */
44#define PXM_BITMAP_LEN (MAX_PXM_DOMAINS / 8)
45static u8 __initdata pxm_bitmap[PXM_BITMAP_LEN];	/* bitmap of proximity domains */
46
47#define MAX_CHUNKS_PER_NODE	3
48#define MAXCHUNKS		(MAX_CHUNKS_PER_NODE * MAX_NUMNODES)
49struct node_memory_chunk_s {
50	unsigned long	start_pfn;
51	unsigned long	end_pfn;
52	u8	pxm;		// proximity domain of node
53	u8	nid;		// which cnode contains this chunk?
54	u8	bank;		// which mem bank on this node
55};
56static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
57
58static int __initdata num_memory_chunks; /* total number of memory chunks */
59static u8 __initdata apicid_to_pxm[MAX_APICID];
60
61int numa_off __initdata;
62int acpi_numa __initdata;
63
64static __init void bad_srat(void)
65{
66        printk(KERN_ERR "SRAT: SRAT not used.\n");
67        acpi_numa = -1;
68	num_memory_chunks = 0;
69}
70
71static __init inline int srat_disabled(void)
72{
73	return numa_off || acpi_numa < 0;
74}
75
76/* Identify CPU proximity domains */
77void __init
78acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *cpu_affinity)
79{
80	if (srat_disabled())
81		return;
82	if (cpu_affinity->header.length !=
83	     sizeof(struct acpi_srat_cpu_affinity)) {
84		bad_srat();
85		return;
86	}
87
88	if ((cpu_affinity->flags & ACPI_SRAT_CPU_ENABLED) == 0)
89		return;		/* empty entry */
90
91	/* mark this node as "seen" in node bitmap */
92	BMAP_SET(pxm_bitmap, cpu_affinity->proximity_domain_lo);
93
94	apicid_to_pxm[cpu_affinity->apic_id] = cpu_affinity->proximity_domain_lo;
95
96	printk(KERN_DEBUG "CPU %02x in proximity domain %02x\n",
97		cpu_affinity->apic_id, cpu_affinity->proximity_domain_lo);
98}
99
100/*
101 * Identify memory proximity domains and hot-remove capabilities.
102 * Fill node memory chunk list structure.
103 */
104void __init
105acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *memory_affinity)
106{
107	unsigned long long paddr, size;
108	unsigned long start_pfn, end_pfn;
109	u8 pxm;
110	struct node_memory_chunk_s *p, *q, *pend;
111
112	if (srat_disabled())
113		return;
114	if (memory_affinity->header.length !=
115	     sizeof(struct acpi_srat_mem_affinity)) {
116		bad_srat();
117		return;
118	}
119
120	if ((memory_affinity->flags & ACPI_SRAT_MEM_ENABLED) == 0)
121		return;		/* empty entry */
122
123	pxm = memory_affinity->proximity_domain & 0xff;
124
125	/* mark this node as "seen" in node bitmap */
126	BMAP_SET(pxm_bitmap, pxm);
127
128	/* calculate info for memory chunk structure */
129	paddr = memory_affinity->base_address;
130	size = memory_affinity->length;
131
132	start_pfn = paddr >> PAGE_SHIFT;
133	end_pfn = (paddr + size) >> PAGE_SHIFT;
134
135
136	if (num_memory_chunks >= MAXCHUNKS) {
137		printk(KERN_WARNING "Too many mem chunks in SRAT."
138			" Ignoring %lld MBytes at %llx\n",
139			size/(1024*1024), paddr);
140		return;
141	}
142
143	/* Insertion sort based on base address */
144	pend = &node_memory_chunk[num_memory_chunks];
145	for (p = &node_memory_chunk[0]; p < pend; p++) {
146		if (start_pfn < p->start_pfn)
147			break;
148	}
149	if (p < pend) {
150		for (q = pend; q >= p; q--)
151			*(q + 1) = *q;
152	}
153	p->start_pfn = start_pfn;
154	p->end_pfn = end_pfn;
155	p->pxm = pxm;
156
157	num_memory_chunks++;
158
159	printk(KERN_DEBUG "Memory range %08lx to %08lx"
160			  " in proximity domain %02x %s\n",
161		start_pfn, end_pfn,
162		pxm,
163		((memory_affinity->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) ?
164		 "enabled and removable" : "enabled" ) );
165}
166
167/* Callback for SLIT parsing */
168void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
169{
170}
171
172void acpi_numa_arch_fixup(void)
173{
174}
175/*
176 * The SRAT table always lists ascending addresses, so can always
177 * assume that the first "start" address that you see is the real
178 * start of the node, and that the current "end" address is after
179 * the previous one.
180 */
181static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
182{
183	/*
184	 * Only add present memory as told by the e820.
185	 * There is no guarantee from the SRAT that the memory it
186	 * enumerates is present at boot time because it represents
187	 * *possible* memory hotplug areas the same as normal RAM.
188	 */
189	if (memory_chunk->start_pfn >= max_pfn) {
190		printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
191			memory_chunk->start_pfn, memory_chunk->end_pfn);
192		return -1;
193	}
194	if (memory_chunk->nid != nid)
195		return -1;
196
197	if (!node_has_online_mem(nid))
198		node_start_pfn[nid] = memory_chunk->start_pfn;
199
200	if (node_start_pfn[nid] > memory_chunk->start_pfn)
201		node_start_pfn[nid] = memory_chunk->start_pfn;
202
203	if (node_end_pfn[nid] < memory_chunk->end_pfn)
204		node_end_pfn[nid] = memory_chunk->end_pfn;
205
206	return 0;
207}
208
209int __init get_memcfg_from_srat(void)
210{
211	int i, j, nid;
212
213
214	if (srat_disabled())
215		goto out_fail;
216
217	if (num_memory_chunks == 0) {
218		printk(KERN_DEBUG
219			 "could not find any ACPI SRAT memory areas.\n");
220		goto out_fail;
221	}
222
223	/* Calculate total number of nodes in system from PXM bitmap and create
224	 * a set of sequential node IDs starting at zero.  (ACPI doesn't seem
225	 * to specify the range of _PXM values.)
226	 */
227	/*
228	 * MCD - we no longer HAVE to number nodes sequentially.  PXM domain
229	 * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically
230	 * 32, so we will continue numbering them in this manner until MAX_NUMNODES
231	 * approaches MAX_PXM_DOMAINS for i386.
232	 */
233	nodes_clear(node_online_map);
234	for (i = 0; i < MAX_PXM_DOMAINS; i++) {
235		if (BMAP_TEST(pxm_bitmap, i)) {
236			int nid = acpi_map_pxm_to_node(i);
237			node_set_online(nid);
238		}
239	}
240	BUG_ON(num_online_nodes() == 0);
241
242	/* set cnode id in memory chunk structure */
243	for (i = 0; i < num_memory_chunks; i++)
244		node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm);
245
246	printk(KERN_DEBUG "pxm bitmap: ");
247	for (i = 0; i < sizeof(pxm_bitmap); i++) {
248		printk(KERN_CONT "%02x ", pxm_bitmap[i]);
249	}
250	printk(KERN_CONT "\n");
251	printk(KERN_DEBUG "Number of logical nodes in system = %d\n",
252			 num_online_nodes());
253	printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
254			 num_memory_chunks);
255
256	for (i = 0; i < MAX_APICID; i++)
257		apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]);
258
259	for (j = 0; j < num_memory_chunks; j++){
260		struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
261		printk(KERN_DEBUG
262			"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
263		       j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
264		if (node_read_chunk(chunk->nid, chunk))
265			continue;
266
267		e820_register_active_regions(chunk->nid, chunk->start_pfn,
268					     min(chunk->end_pfn, max_pfn));
269	}
270	/* for out of order entries in SRAT */
271	sort_node_map();
272
273	for_each_online_node(nid) {
274		unsigned long start = node_start_pfn[nid];
275		unsigned long end = min(node_end_pfn[nid], max_pfn);
276
277		memory_present(nid, start, end);
278		node_remap_size[nid] = node_memmap_size_bytes(nid, start, end);
279	}
280	return 1;
281out_fail:
282	printk(KERN_DEBUG "failed to get NUMA memory information from SRAT"
283			" table\n");
284	return 0;
285}
286