1/* 2 * linux/arch/alpha/mm/numa.c 3 * 4 * DISCONTIGMEM NUMA alpha support. 5 * 6 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE 7 */ 8 9#include <linux/types.h> 10#include <linux/kernel.h> 11#include <linux/mm.h> 12#include <linux/bootmem.h> 13#include <linux/swap.h> 14#include <linux/initrd.h> 15#include <linux/pfn.h> 16#include <linux/module.h> 17 18#include <asm/hwrpb.h> 19#include <asm/pgalloc.h> 20 21pg_data_t node_data[MAX_NUMNODES]; 22bootmem_data_t node_bdata[MAX_NUMNODES]; 23EXPORT_SYMBOL(node_data); 24 25#undef DEBUG_DISCONTIG 26#ifdef DEBUG_DISCONTIG 27#define DBGDCONT(args...) printk(args) 28#else 29#define DBGDCONT(args...) 30#endif 31 32#define for_each_mem_cluster(memdesc, cluster, i) \ 33 for ((cluster) = (memdesc)->cluster, (i) = 0; \ 34 (i) < (memdesc)->numclusters; (i)++, (cluster)++) 35 36static void __init show_mem_layout(void) 37{ 38 struct memclust_struct * cluster; 39 struct memdesc_struct * memdesc; 40 int i; 41 42 /* Find free clusters, and init and free the bootmem accordingly. */ 43 memdesc = (struct memdesc_struct *) 44 (hwrpb->mddt_offset + (unsigned long) hwrpb); 45 46 printk("Raw memory layout:\n"); 47 for_each_mem_cluster(memdesc, cluster, i) { 48 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 49 i, cluster->usage, cluster->start_pfn, 50 cluster->start_pfn + cluster->numpages); 51 } 52} 53 54static void __init 55setup_memory_node(int nid, void *kernel_end) 56{ 57 extern unsigned long mem_size_limit; 58 struct memclust_struct * cluster; 59 struct memdesc_struct * memdesc; 60 unsigned long start_kernel_pfn, end_kernel_pfn; 61 unsigned long bootmap_size, bootmap_pages, bootmap_start; 62 unsigned long start, end; 63 unsigned long node_pfn_start, node_pfn_end; 64 unsigned long node_min_pfn, node_max_pfn; 65 int i; 66 unsigned long node_datasz = PFN_UP(sizeof(pg_data_t)); 67 int show_init = 0; 68 69 /* Find the bounds of current node */ 70 node_pfn_start = (node_mem_start(nid)) >> PAGE_SHIFT; 71 node_pfn_end = node_pfn_start + (node_mem_size(nid) >> PAGE_SHIFT); 72 73 /* Find free clusters, and init and free the bootmem accordingly. */ 74 memdesc = (struct memdesc_struct *) 75 (hwrpb->mddt_offset + (unsigned long) hwrpb); 76 77 /* find the bounds of this node (node_min_pfn/node_max_pfn) */ 78 node_min_pfn = ~0UL; 79 node_max_pfn = 0UL; 80 for_each_mem_cluster(memdesc, cluster, i) { 81 /* Bit 0 is console/PALcode reserved. Bit 1 is 82 non-volatile memory -- we might want to mark 83 this for later. */ 84 if (cluster->usage & 3) 85 continue; 86 87 start = cluster->start_pfn; 88 end = start + cluster->numpages; 89 90 if (start >= node_pfn_end || end <= node_pfn_start) 91 continue; 92 93 if (!show_init) { 94 show_init = 1; 95 printk("Initializing bootmem allocator on Node ID %d\n", nid); 96 } 97 printk(" memcluster %2d, usage %1lx, start %8lu, end %8lu\n", 98 i, cluster->usage, cluster->start_pfn, 99 cluster->start_pfn + cluster->numpages); 100 101 if (start < node_pfn_start) 102 start = node_pfn_start; 103 if (end > node_pfn_end) 104 end = node_pfn_end; 105 106 if (start < node_min_pfn) 107 node_min_pfn = start; 108 if (end > node_max_pfn) 109 node_max_pfn = end; 110 } 111 112 if (mem_size_limit && node_max_pfn > mem_size_limit) { 113 static int msg_shown = 0; 114 if (!msg_shown) { 115 msg_shown = 1; 116 printk("setup: forcing memory size to %ldK (from %ldK).\n", 117 mem_size_limit << (PAGE_SHIFT - 10), 118 node_max_pfn << (PAGE_SHIFT - 10)); 119 } 120 node_max_pfn = mem_size_limit; 121 } 122 123 if (node_min_pfn >= node_max_pfn) 124 return; 125 126 /* Update global {min,max}_low_pfn from node information. */ 127 if (node_min_pfn < min_low_pfn) 128 min_low_pfn = node_min_pfn; 129 if (node_max_pfn > max_low_pfn) 130 max_pfn = max_low_pfn = node_max_pfn; 131 132 num_physpages += node_max_pfn - node_min_pfn; 133 134 /* Quasi-mark the pg_data_t as in-use */ 135 node_min_pfn += node_datasz; 136 if (node_min_pfn >= node_max_pfn) { 137 printk(" not enough mem to reserve NODE_DATA"); 138 return; 139 } 140 NODE_DATA(nid)->bdata = &node_bdata[nid]; 141 142 printk(" Detected node memory: start %8lu, end %8lu\n", 143 node_min_pfn, node_max_pfn); 144 145 DBGDCONT(" DISCONTIG: node_data[%d] is at 0x%p\n", nid, NODE_DATA(nid)); 146 DBGDCONT(" DISCONTIG: NODE_DATA(%d)->bdata is at 0x%p\n", nid, NODE_DATA(nid)->bdata); 147 148 /* Find the bounds of kernel memory. */ 149 start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS); 150 end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end)); 151 bootmap_start = -1; 152 153 if (!nid && (node_max_pfn < end_kernel_pfn || node_min_pfn > start_kernel_pfn)) 154 panic("kernel loaded out of ram"); 155 156 /* Zone start phys-addr must be 2^(MAX_ORDER-1) aligned. 157 Note that we round this down, not up - node memory 158 has much larger alignment than 8Mb, so it's safe. */ 159 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); 160 161 /* We need to know how many physically contiguous pages 162 we'll need for the bootmap. */ 163 bootmap_pages = bootmem_bootmap_pages(node_max_pfn-node_min_pfn); 164 165 /* Now find a good region where to allocate the bootmap. */ 166 for_each_mem_cluster(memdesc, cluster, i) { 167 if (cluster->usage & 3) 168 continue; 169 170 start = cluster->start_pfn; 171 end = start + cluster->numpages; 172 173 if (start >= node_max_pfn || end <= node_min_pfn) 174 continue; 175 176 if (end > node_max_pfn) 177 end = node_max_pfn; 178 if (start < node_min_pfn) 179 start = node_min_pfn; 180 181 if (start < start_kernel_pfn) { 182 if (end > end_kernel_pfn 183 && end - end_kernel_pfn >= bootmap_pages) { 184 bootmap_start = end_kernel_pfn; 185 break; 186 } else if (end > start_kernel_pfn) 187 end = start_kernel_pfn; 188 } else if (start < end_kernel_pfn) 189 start = end_kernel_pfn; 190 if (end - start >= bootmap_pages) { 191 bootmap_start = start; 192 break; 193 } 194 } 195 196 if (bootmap_start == -1) 197 panic("couldn't find a contigous place for the bootmap"); 198 199 /* Allocate the bootmap and mark the whole MM as reserved. */ 200 bootmap_size = init_bootmem_node(NODE_DATA(nid), bootmap_start, 201 node_min_pfn, node_max_pfn); 202 DBGDCONT(" bootmap_start %lu, bootmap_size %lu, bootmap_pages %lu\n", 203 bootmap_start, bootmap_size, bootmap_pages); 204 205 /* Mark the free regions. */ 206 for_each_mem_cluster(memdesc, cluster, i) { 207 if (cluster->usage & 3) 208 continue; 209 210 start = cluster->start_pfn; 211 end = cluster->start_pfn + cluster->numpages; 212 213 if (start >= node_max_pfn || end <= node_min_pfn) 214 continue; 215 216 if (end > node_max_pfn) 217 end = node_max_pfn; 218 if (start < node_min_pfn) 219 start = node_min_pfn; 220 221 if (start < start_kernel_pfn) { 222 if (end > end_kernel_pfn) { 223 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), 224 (PFN_PHYS(start_kernel_pfn) 225 - PFN_PHYS(start))); 226 printk(" freeing pages %ld:%ld\n", 227 start, start_kernel_pfn); 228 start = end_kernel_pfn; 229 } else if (end > start_kernel_pfn) 230 end = start_kernel_pfn; 231 } else if (start < end_kernel_pfn) 232 start = end_kernel_pfn; 233 if (start >= end) 234 continue; 235 236 free_bootmem_node(NODE_DATA(nid), PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start)); 237 printk(" freeing pages %ld:%ld\n", start, end); 238 } 239 240 /* Reserve the bootmap memory. */ 241 reserve_bootmem_node(NODE_DATA(nid), PFN_PHYS(bootmap_start), bootmap_size); 242 printk(" reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size)); 243 244 node_set_online(nid); 245} 246 247void __init 248setup_memory(void *kernel_end) 249{ 250 int nid; 251 252 show_mem_layout(); 253 254 nodes_clear(node_online_map); 255 256 min_low_pfn = ~0UL; 257 max_low_pfn = 0UL; 258 for (nid = 0; nid < MAX_NUMNODES; nid++) 259 setup_memory_node(nid, kernel_end); 260 261#ifdef CONFIG_BLK_DEV_INITRD 262 initrd_start = INITRD_START; 263 if (initrd_start) { 264 extern void *move_initrd(unsigned long); 265 266 initrd_end = initrd_start+INITRD_SIZE; 267 printk("Initial ramdisk at: 0x%p (%lu bytes)\n", 268 (void *) initrd_start, INITRD_SIZE); 269 270 if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) { 271 if (!move_initrd(PFN_PHYS(max_low_pfn))) 272 printk("initrd extends beyond end of memory " 273 "(0x%08lx > 0x%p)\ndisabling initrd\n", 274 initrd_end, 275 phys_to_virt(PFN_PHYS(max_low_pfn))); 276 } else { 277 nid = kvaddr_to_nid(initrd_start); 278 reserve_bootmem_node(NODE_DATA(nid), 279 virt_to_phys((void *)initrd_start), 280 INITRD_SIZE); 281 } 282 } 283#endif /* CONFIG_BLK_DEV_INITRD */ 284} 285 286void __init paging_init(void) 287{ 288 unsigned int nid; 289 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 290 unsigned long dma_local_pfn; 291 292 /* 293 * The old global MAX_DMA_ADDRESS per-arch API doesn't fit 294 * in the NUMA model, for now we convert it to a pfn and 295 * we interpret this pfn as a local per-node information. 296 * This issue isn't very important since none of these machines 297 * have legacy ISA slots anyways. 298 */ 299 dma_local_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; 300 301 for_each_online_node(nid) { 302 unsigned long start_pfn = node_bdata[nid].node_boot_start >> PAGE_SHIFT; 303 unsigned long end_pfn = node_bdata[nid].node_low_pfn; 304 305 if (dma_local_pfn >= end_pfn - start_pfn) 306 zones_size[ZONE_DMA] = end_pfn - start_pfn; 307 else { 308 zones_size[ZONE_DMA] = dma_local_pfn; 309 zones_size[ZONE_NORMAL] = (end_pfn - start_pfn) - dma_local_pfn; 310 } 311 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, NULL); 312 } 313 314 /* Initialize the kernel's ZERO_PGE. */ 315 memset((void *)ZERO_PGE, 0, PAGE_SIZE); 316} 317 318void __init mem_init(void) 319{ 320 unsigned long codesize, reservedpages, datasize, initsize, pfn; 321 extern int page_is_ram(unsigned long) __init; 322 extern char _text, _etext, _data, _edata; 323 extern char __init_begin, __init_end; 324 unsigned long nid, i; 325 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT); 326 327 reservedpages = 0; 328 for_each_online_node(nid) { 329 /* 330 * This will free up the bootmem, ie, slot 0 memory 331 */ 332 totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); 333 334 pfn = NODE_DATA(nid)->node_start_pfn; 335 for (i = 0; i < node_spanned_pages(nid); i++, pfn++) 336 if (page_is_ram(pfn) && 337 PageReserved(nid_page_nr(nid, i))) 338 reservedpages++; 339 } 340 341 codesize = (unsigned long) &_etext - (unsigned long) &_text; 342 datasize = (unsigned long) &_edata - (unsigned long) &_data; 343 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 344 345 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, " 346 "%luk data, %luk init)\n", 347 (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), 348 num_physpages << (PAGE_SHIFT-10), 349 codesize >> 10, 350 reservedpages << (PAGE_SHIFT-10), 351 datasize >> 10, 352 initsize >> 10); 353} 354 355void 356show_mem(void) 357{ 358 long i,free = 0,total = 0,reserved = 0; 359 long shared = 0, cached = 0; 360 int nid; 361 362 printk("\nMem-info:\n"); 363 show_free_areas(); 364 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 365 for_each_online_node(nid) { 366 unsigned long flags; 367 pgdat_resize_lock(NODE_DATA(nid), &flags); 368 i = node_spanned_pages(nid); 369 while (i-- > 0) { 370 struct page *page = nid_page_nr(nid, i); 371 total++; 372 if (PageReserved(page)) 373 reserved++; 374 else if (PageSwapCache(page)) 375 cached++; 376 else if (!page_count(page)) 377 free++; 378 else 379 shared += page_count(page) - 1; 380 } 381 pgdat_resize_unlock(NODE_DATA(nid), &flags); 382 } 383 printk("%ld pages of RAM\n",total); 384 printk("%ld free pages\n",free); 385 printk("%ld reserved pages\n",reserved); 386 printk("%ld pages shared\n",shared); 387 printk("%ld pages swap cached\n",cached); 388} 389