1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 *
|
64 * $Id: vm_kern.c,v 1.49 1998/08/24 08:39:37 dfr Exp $
|
64 * $Id: vm_kern.c,v 1.50 1998/09/04 08:06:57 dfr Exp $ |
65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> 74#include <sys/malloc.h> 75 76#include <vm/vm.h> 77#include <vm/vm_param.h> 78#include <vm/vm_prot.h> 79#include <sys/lock.h> 80#include <vm/pmap.h> 81#include <vm/vm_map.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_pageout.h> 85#include <vm/vm_extern.h> 86 87vm_map_t kernel_map=0; 88vm_map_t kmem_map=0; 89vm_map_t exec_map=0; 90vm_map_t clean_map=0; 91vm_map_t u_map=0; 92vm_map_t buffer_map=0; 93vm_map_t mb_map=0; 94int mb_map_full=0; 95vm_map_t io_map=0; 96vm_map_t phys_map=0; 97 98/* 99 * kmem_alloc_pageable: 100 * 101 * Allocate pageable memory to the kernel's address map. 102 * "map" must be kernel_map or a submap of kernel_map. 103 */ 104 105vm_offset_t 106kmem_alloc_pageable(map, size) 107 vm_map_t map; 108 register vm_size_t size; 109{ 110 vm_offset_t addr; 111 register int result; 112 113 size = round_page(size); 114 addr = vm_map_min(map); 115 result = vm_map_find(map, NULL, (vm_offset_t) 0, 116 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 117 if (result != KERN_SUCCESS) { 118 return (0); 119 } 120 return (addr); 121} 122 123/* 124 * Allocate wired-down memory in the kernel's address map 125 * or a submap. 126 */ 127vm_offset_t 128kmem_alloc(map, size) 129 register vm_map_t map; 130 register vm_size_t size; 131{ 132 vm_offset_t addr; 133 register vm_offset_t offset; 134 vm_offset_t i; 135 136 size = round_page(size); 137 138 /* 139 * Use the kernel object for wired-down kernel pages. Assume that no 140 * region of the kernel object is referenced more than once. 141 */ 142 143 /* 144 * Locate sufficient space in the map. This will give us the final 145 * virtual address for the new memory, and thus will tell us the 146 * offset within the kernel map. 147 */ 148 vm_map_lock(map); 149 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 150 vm_map_unlock(map); 151 return (0); 152 } 153 offset = addr - VM_MIN_KERNEL_ADDRESS; 154 vm_object_reference(kernel_object); 155 vm_map_insert(map, kernel_object, offset, addr, addr + size, 156 VM_PROT_ALL, VM_PROT_ALL, 0); 157 vm_map_unlock(map); 158 159 /* 160 * Guarantee that there are pages already in this object before 161 * calling vm_map_pageable. This is to prevent the following 162 * scenario: 163 * 164 * 1) Threads have swapped out, so that there is a pager for the 165 * kernel_object. 2) The kmsg zone is empty, and so we are 166 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 167 * there is no page, but there is a pager, so we call 168 * pager_data_request. But the kmsg zone is empty, so we must 169 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 170 * we get the data back from the pager, it will be (very stale) 171 * non-zero data. kmem_alloc is defined to return zero-filled memory. 172 * 173 * We're intentionally not activating the pages we allocate to prevent a 174 * race with page-out. vm_map_pageable will wire the pages. 175 */ 176 177 for (i = 0; i < size; i += PAGE_SIZE) { 178 vm_page_t mem; 179 180 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 181 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 182 if ((mem->flags & PG_ZERO) == 0) 183 vm_page_zero_fill(mem);
|
184 vm_page_flag_clear(mem, (PG_BUSY | PG_ZERO));
|
184 mem->valid = VM_PAGE_BITS_ALL;
|
185 vm_page_flag_clear(mem, PG_ZERO); 186 vm_page_wakeup(mem); |
187 } 188 189 /* 190 * And finally, mark the data as non-pageable. 191 */ 192 193 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 194 195 return (addr); 196} 197 198/* 199 * kmem_free: 200 * 201 * Release a region of kernel virtual memory allocated 202 * with kmem_alloc, and return the physical pages 203 * associated with that region.
|
204 * 205 * This routine may not block on kernel maps. |
206 */ 207void 208kmem_free(map, addr, size) 209 vm_map_t map; 210 register vm_offset_t addr; 211 vm_size_t size; 212{ 213 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 214} 215 216/* 217 * kmem_suballoc: 218 * 219 * Allocates a map to manage a subrange 220 * of the kernel virtual address space. 221 * 222 * Arguments are as follows: 223 * 224 * parent Map to take range from 225 * size Size of range to find 226 * min, max Returned endpoints of map 227 * pageable Can the region be paged 228 */ 229vm_map_t 230kmem_suballoc(parent, min, max, size) 231 register vm_map_t parent; 232 vm_offset_t *min, *max; 233 register vm_size_t size; 234{ 235 register int ret; 236 vm_map_t result; 237 238 size = round_page(size); 239 240 *min = (vm_offset_t) vm_map_min(parent); 241 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 242 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 243 if (ret != KERN_SUCCESS) { 244 printf("kmem_suballoc: bad status return of %d.\n", ret); 245 panic("kmem_suballoc"); 246 } 247 *max = *min + size; 248 pmap_reference(vm_map_pmap(parent)); 249 result = vm_map_create(vm_map_pmap(parent), *min, *max); 250 if (result == NULL) 251 panic("kmem_suballoc: cannot create submap"); 252 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 253 panic("kmem_suballoc: unable to change range to submap"); 254 return (result); 255} 256 257/*
|
255 * Allocate wired-down memory in the kernel's address map for the higher
256 * level kernel memory allocator (kern/kern_malloc.c). We cannot use
257 * kmem_alloc() because we may need to allocate memory at interrupt
258 * level where we cannot block (canwait == FALSE).
|
258 * kmem_malloc: |
259 *
|
260 * This routine has its own private kernel submap (kmem_map) and object
261 * (kmem_object). This, combined with the fact that only malloc uses
262 * this routine, ensures that we will never block in map or object waits.
|
260 * Allocate wired-down memory in the kernel's address map for the higher 261 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 262 * kmem_alloc() because we may need to allocate memory at interrupt 263 * level where we cannot block (canwait == FALSE). |
264 *
|
264 * Note that this still only works in a uni-processor environment and
265 * when called at splhigh().
|
265 * This routine has its own private kernel submap (kmem_map) and object 266 * (kmem_object). This, combined with the fact that only malloc uses 267 * this routine, ensures that we will never block in map or object waits. |
268 *
|
267 * We don't worry about expanding the map (adding entries) since entries
268 * for wired maps are statically allocated.
|
269 * Note that this still only works in a uni-processor environment and 270 * when called at splhigh(). 271 * 272 * We don't worry about expanding the map (adding entries) since entries 273 * for wired maps are statically allocated. 274 * 275 * NOTE: This routine is not supposed to block if M_NOWAIT is set, but 276 * I have not verified that it actually does not block. |
277 */ 278vm_offset_t
|
271kmem_malloc(map, size, waitflag)
|
279kmem_malloc(map, size, flags) |
280 register vm_map_t map; 281 register vm_size_t size;
|
274 boolean_t waitflag;
|
282 int flags; |
283{ 284 register vm_offset_t offset, i; 285 vm_map_entry_t entry; 286 vm_offset_t addr; 287 vm_page_t m; 288 289 if (map != kmem_map && map != mb_map) 290 panic("kmem_malloc: map != {kmem,mb}_map"); 291 292 size = round_page(size); 293 addr = vm_map_min(map); 294 295 /* 296 * Locate sufficient space in the map. This will give us the final 297 * virtual address for the new memory, and thus will tell us the 298 * offset within the kernel map. 299 */ 300 vm_map_lock(map); 301 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 302 vm_map_unlock(map); 303 if (map == mb_map) { 304 mb_map_full = TRUE; 305 printf("Out of mbuf clusters - adjust NMBCLUSTERS or increase maxusers!\n"); 306 return (0); 307 }
|
300 if (waitflag == M_WAITOK)
|
308 if ((flags & M_NOWAIT) == 0) |
309 panic("kmem_malloc(%d): kmem_map too small: %d total allocated", 310 size, map->size); 311 return (0); 312 } 313 offset = addr - VM_MIN_KERNEL_ADDRESS; 314 vm_object_reference(kmem_object); 315 vm_map_insert(map, kmem_object, offset, addr, addr + size, 316 VM_PROT_ALL, VM_PROT_ALL, 0); 317 318 for (i = 0; i < size; i += PAGE_SIZE) {
|
319 /* 320 * Note: if M_NOWAIT specified alone, allocate from 321 * interrupt-safe queues only (just the free list). If 322 * M_ASLEEP or M_USE_RESERVE is also specified, we can also 323 * allocate from the cache. Neither of the latter two 324 * flags may be specified from an interrupt since interrupts 325 * are not allowed to mess with the cache queue. 326 */ |
327retry: 328 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i),
|
313 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM);
|
329 ((flags & (M_NOWAIT|M_ASLEEP|M_USE_RESERVE)) == M_NOWAIT) ? 330 VM_ALLOC_INTERRUPT : 331 VM_ALLOC_SYSTEM); |
332 333 /* 334 * Ran out of space, free everything up and return. Don't need 335 * to lock page queues here as we know that the pages we got 336 * aren't on any queues. 337 */ 338 if (m == NULL) {
|
321 if (waitflag == M_WAITOK) {
|
339 if ((flags & M_NOWAIT) == 0) { |
340 VM_WAIT; 341 goto retry; 342 } 343 while (i != 0) { 344 i -= PAGE_SIZE; 345 m = vm_page_lookup(kmem_object, 346 OFF_TO_IDX(offset + i)); 347 vm_page_free(m); 348 } 349 vm_map_delete(map, addr, addr + size); 350 vm_map_unlock(map);
|
351 if (flags & M_ASLEEP) { 352 VM_AWAIT; 353 } |
354 return (0); 355 } 356 vm_page_flag_clear(m, PG_ZERO); 357 m->valid = VM_PAGE_BITS_ALL; 358 } 359 360 /* 361 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 362 * be able to extend the previous entry so there will be a new entry 363 * exactly corresponding to this address range and it will have 364 * wired_count == 0. 365 */ 366 if (!vm_map_lookup_entry(map, addr, &entry) || 367 entry->start != addr || entry->end != addr + size || 368 entry->wired_count) 369 panic("kmem_malloc: entry not found or misaligned"); 370 entry->wired_count++; 371 372 vm_map_simplify_entry(map, entry); 373 374 /* 375 * Loop thru pages, entering them in the pmap. (We cannot add them to 376 * the wired count without wrapping the vm_page_queue_lock in 377 * splimp...) 378 */ 379 for (i = 0; i < size; i += PAGE_SIZE) { 380 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 381 vm_page_wire(m); 382 vm_page_wakeup(m);
|
383 /* 384 * Because this is kernel_pmap, this call will not block. 385 */ |
386 pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), 387 VM_PROT_ALL, 1); 388 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); 389 } 390 vm_map_unlock(map); 391 392 return (addr); 393} 394 395/*
|
372 * kmem_alloc_wait
|
396 * kmem_alloc_wait: |
397 * 398 * Allocates pageable memory from a sub-map of the kernel. If the submap 399 * has no room, the caller sleeps waiting for more memory in the submap. 400 *
|
401 * This routine may block. |
402 */
|
403 |
404vm_offset_t 405kmem_alloc_wait(map, size) 406 vm_map_t map; 407 vm_size_t size; 408{ 409 vm_offset_t addr; 410 411 size = round_page(size); 412 413 for (;;) { 414 /* 415 * To make this work for more than one map, use the map's lock 416 * to lock out sleepers/wakers. 417 */ 418 vm_map_lock(map); 419 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 420 break; 421 /* no space now; see if we can ever get space */ 422 if (vm_map_max(map) - vm_map_min(map) < size) { 423 vm_map_unlock(map); 424 return (0); 425 } 426 vm_map_unlock(map); 427 tsleep(map, PVM, "kmaw", 0); 428 } 429 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 430 vm_map_unlock(map); 431 return (addr); 432} 433 434/*
|
409 * kmem_free_wakeup
|
435 * kmem_free_wakeup: |
436 * 437 * Returns memory to a submap of the kernel, and wakes up any processes 438 * waiting for memory in that map. 439 */ 440void 441kmem_free_wakeup(map, addr, size) 442 vm_map_t map; 443 vm_offset_t addr; 444 vm_size_t size; 445{ 446 vm_map_lock(map); 447 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 448 wakeup(map); 449 vm_map_unlock(map); 450} 451 452/*
|
427 * Create the kernel map; insert a mapping covering kernel text, data, bss,
428 * and all space allocated thus far (`boostrap' data). The new map will thus
429 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and
430 * the range between `start' and `end' as free.
|
453 * kmem_init: 454 * 455 * Create the kernel map; insert a mapping covering kernel text, 456 * data, bss, and all space allocated thus far (`boostrap' data). The 457 * new map will thus map the range between VM_MIN_KERNEL_ADDRESS and 458 * `start' as allocated, and the range between `start' and `end' as free. |
459 */
|
460 |
461void 462kmem_init(start, end) 463 vm_offset_t start, end; 464{ 465 register vm_map_t m; 466 467 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 468 vm_map_lock(m); 469 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 470 kernel_map = m; 471 kernel_map->system_map = 1; 472 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 473 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 474 /* ... and ending with the completion of the above `insert' */ 475 vm_map_unlock(m); 476}
|
477 |
|