1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without --- 47 unchanged lines hidden (view full) --- 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * |
64 * $Id: vm_kern.c,v 1.50 1998/09/04 08:06:57 dfr Exp $ |
65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> --- 103 unchanged lines hidden (view full) --- 176 177 for (i = 0; i < size; i += PAGE_SIZE) { 178 vm_page_t mem; 179 180 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 181 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 182 if ((mem->flags & PG_ZERO) == 0) 183 vm_page_zero_fill(mem); |
184 mem->valid = VM_PAGE_BITS_ALL; |
185 vm_page_flag_clear(mem, PG_ZERO); 186 vm_page_wakeup(mem); |
187 } 188 189 /* 190 * And finally, mark the data as non-pageable. 191 */ 192 193 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 194 195 return (addr); 196} 197 198/* 199 * kmem_free: 200 * 201 * Release a region of kernel virtual memory allocated 202 * with kmem_alloc, and return the physical pages 203 * associated with that region. |
204 * 205 * This routine may not block on kernel maps. |
206 */ 207void 208kmem_free(map, addr, size) 209 vm_map_t map; 210 register vm_offset_t addr; 211 vm_size_t size; 212{ 213 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); --- 36 unchanged lines hidden (view full) --- 250 if (result == NULL) 251 panic("kmem_suballoc: cannot create submap"); 252 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 253 panic("kmem_suballoc: unable to change range to submap"); 254 return (result); 255} 256 257/* |
258 * kmem_malloc: |
259 * |
260 * Allocate wired-down memory in the kernel's address map for the higher 261 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 262 * kmem_alloc() because we may need to allocate memory at interrupt 263 * level where we cannot block (canwait == FALSE). |
264 * |
265 * This routine has its own private kernel submap (kmem_map) and object 266 * (kmem_object). This, combined with the fact that only malloc uses 267 * this routine, ensures that we will never block in map or object waits. |
268 * |
269 * Note that this still only works in a uni-processor environment and 270 * when called at splhigh(). 271 * 272 * We don't worry about expanding the map (adding entries) since entries 273 * for wired maps are statically allocated. 274 * 275 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 276 * I have not verified that it actually does not block. |
277 */ 278vm_offset_t |
279kmem_malloc(map, size, flags) |
280 register vm_map_t map; 281 register vm_size_t size; |
282 int flags; |
283{ 284 register vm_offset_t offset, i; 285 vm_map_entry_t entry; 286 vm_offset_t addr; 287 vm_page_t m; 288 289 if (map != kmem_map && map != mb_map) 290 panic("kmem_malloc: map != {kmem,mb}_map"); --- 9 unchanged lines hidden (view full) --- 300 vm_map_lock(map); 301 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 302 vm_map_unlock(map); 303 if (map == mb_map) { 304 mb_map_full = TRUE; 305 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 306 return (0); 307 } |
308 if ((flags & M_NOWAIT) == 0) |
309 panic("kmem_malloc(%d): kmem_map too small: %d total allocated", 310 size, map->size); 311 return (0); 312 } 313 offset = addr - VM_MIN_KERNEL_ADDRESS; 314 vm_object_reference(kmem_object); 315 vm_map_insert(map, kmem_object, offset, addr, addr + size, 316 VM_PROT_ALL, VM_PROT_ALL, 0); 317 318 for (i = 0; i < size; i += PAGE_SIZE) { |
319 /* 320 * Note: if M_NOWAIT specified alone, allocate from 321 * interrupt-safe queues only (just the free list). If 322 * M_ASLEEP or M_USE_RESERVE is also specified, we can also 323 * allocate from the cache. Neither of the latter two 324 * flags may be specified from an interrupt since interrupts 325 * are not allowed to mess with the cache queue. 326 */ |
327retry: 328 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), |
329 ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ? 330 VM_ALLOC_INTERRUPT : 331 VM_ALLOC_SYSTEM); |
332 333 /* 334 * Ran out of space, free everything up and return. Don't need 335 * to lock page queues here as we know that the pages we got 336 * aren't on any queues. 337 */ 338 if (m == NULL) { |
339 if ((flags & M_NOWAIT) == 0) { |
340 VM_WAIT; 341 goto retry; 342 } 343 while (i != 0) { 344 i -= PAGE_SIZE; 345 m = vm_page_lookup(kmem_object, 346 OFF_TO_IDX(offset + i)); 347 vm_page_free(m); 348 } 349 vm_map_delete(map, addr, addr + size); 350 vm_map_unlock(map); |
351 if (flags & M_ASLEEP) { 352 VM_AWAIT; 353 } |
354 return (0); 355 } 356 vm_page_flag_clear(m, PG_ZERO); 357 m->valid = VM_PAGE_BITS_ALL; 358 } 359 360 /* 361 * Mark map entry as non-pageable. Assert: vm_map_insert() will never --- 13 unchanged lines hidden (view full) --- 375 * Loop thru pages, entering them in the pmap. (We cannot add them to 376 * the wired count without wrapping the vm_page_queue_lock in 377 * splimp...) 378 */ 379 for (i = 0; i < size; i += PAGE_SIZE) { 380 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 381 vm_page_wire(m); 382 vm_page_wakeup(m); |
383 /* 384 * Because this is kernel_pmap, this call will not block. 385 */ |
386 pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), 387 VM_PROT_ALL, 1); 388 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 389 } 390 vm_map_unlock(map); 391 392 return (addr); 393} 394 395/* |
396 * kmem_alloc_wait: |
397 * 398 * Allocates pageable memory from a sub-map of the kernel. If the submap 399 * has no room, the caller sleeps waiting for more memory in the submap. 400 * |
401 * This routine may block. |
402 */ |
403 |
404vm_offset_t 405kmem_alloc_wait(map, size) 406 vm_map_t map; 407 vm_size_t size; 408{ 409 vm_offset_t addr; 410 411 size = round_page(size); --- 15 unchanged lines hidden (view full) --- 427 tsleep(map, PVM, "kmaw", 0); 428 } 429 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 430 vm_map_unlock(map); 431 return (addr); 432} 433 434/* |
435 * kmem_free_wakeup: |
436 * 437 * Returns memory to a submap of the kernel, and wakes up any processes 438 * waiting for memory in that map. 439 */ 440void 441kmem_free_wakeup(map, addr, size) 442 vm_map_t map; 443 vm_offset_t addr; 444 vm_size_t size; 445{ 446 vm_map_lock(map); 447 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 448 wakeup(map); 449 vm_map_unlock(map); 450} 451 452/* |
453 * kmem_init: 454 * 455 * Create the kernel map; insert a mapping covering kernel text, 456 * data, bss, and all space allocated thus far (`boostrap' data). The 457 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 458 * `start' as allocated, and the range between `start' and `end' as free. |
459 */ |
460 |
461void 462kmem_init(start, end) 463 vm_offset_t start, end; 464{ 465 register vm_map_t m; 466 467 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 468 vm_map_lock(m); 469 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 470 kernel_map = m; 471 kernel_map->system_map = 1; 472 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 473 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 474 /* ... and ending with the completion of the above `insert' */ 475 vm_map_unlock(m); 476} |
477 |