vm_kern.c revision 1817
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id$ 65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73 74#include <vm/vm.h> 75#include <vm/vm_page.h> 76#include <vm/vm_pageout.h> 77#include <vm/vm_kern.h> 78 79/* 80 * kmem_alloc_pageable: 81 * 82 * Allocate pageable memory to the kernel's address map. 83 * map must be "kernel_map" below. 84 */ 85 86vm_offset_t kmem_alloc_pageable(map, size) 87 vm_map_t map; 88 register vm_size_t size; 89{ 90 vm_offset_t addr; 91 register int result; 92 93#if 0 94 if (map != kernel_map) 95 panic("kmem_alloc_pageable: not called with kernel_map"); 96#endif 97 98 size = round_page(size); 99 100 addr = vm_map_min(map); 101 result = vm_map_find(map, NULL, (vm_offset_t) 0, 102 &addr, size, TRUE); 103 if (result != KERN_SUCCESS) { 104 return(0); 105 } 106 107 return(addr); 108} 109 110/* 111 * Allocate wired-down memory in the kernel's address map 112 * or a submap. 113 */ 114vm_offset_t kmem_alloc(map, size) 115 register vm_map_t map; 116 register vm_size_t size; 117{ 118 vm_offset_t addr; 119 register vm_offset_t offset; 120 extern vm_object_t kernel_object; 121 vm_offset_t i; 122 123 size = round_page(size); 124 125 /* 126 * Use the kernel object for wired-down kernel pages. 127 * Assume that no region of the kernel object is 128 * referenced more than once. 129 */ 130 131 /* 132 * Locate sufficient space in the map. This will give us the 133 * final virtual address for the new memory, and thus will tell 134 * us the offset within the kernel map. 135 */ 136 vm_map_lock(map); 137 if (vm_map_findspace(map, 0, size, &addr)) { 138 vm_map_unlock(map); 139 return (0); 140 } 141 offset = addr - VM_MIN_KERNEL_ADDRESS; 142 vm_object_reference(kernel_object); 143 vm_map_insert(map, kernel_object, offset, addr, addr + size); 144 vm_map_unlock(map); 145 146 /* 147 * Guarantee that there are pages already in this object 148 * before calling vm_map_pageable. This is to prevent the 149 * following scenario: 150 * 151 * 1) Threads have swapped out, so that there is a 152 * pager for the kernel_object. 153 * 2) The kmsg zone is empty, and so we are kmem_allocing 154 * a new page for it. 155 * 3) vm_map_pageable calls vm_fault; there is no page, 156 * but there is a pager, so we call 157 * pager_data_request. But the kmsg zone is empty, 158 * so we must kmem_alloc. 159 * 4) goto 1 160 * 5) Even if the kmsg zone is not empty: when we get 161 * the data back from the pager, it will be (very 162 * stale) non-zero data. kmem_alloc is defined to 163 * return zero-filled memory. 164 * 165 * We're intentionally not activating the pages we allocate 166 * to prevent a race with page-out. vm_map_pageable will wire 167 * the pages. 168 */ 169 170 vm_object_lock(kernel_object); 171 for (i = 0 ; i < size; i+= PAGE_SIZE) { 172 vm_page_t mem; 173 174 while ((mem = vm_page_alloc(kernel_object, offset+i)) == NULL) { 175 vm_object_unlock(kernel_object); 176 VM_WAIT; 177 vm_object_lock(kernel_object); 178 } 179 vm_page_zero_fill(mem); 180 mem->flags &= ~PG_BUSY; 181 } 182 vm_object_unlock(kernel_object); 183 184 /* 185 * And finally, mark the data as non-pageable. 186 */ 187 188 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 189 190 /* 191 * Try to coalesce the map 192 */ 193 194 vm_map_simplify(map, addr); 195 196 return(addr); 197} 198 199/* 200 * kmem_free: 201 * 202 * Release a region of kernel virtual memory allocated 203 * with kmem_alloc, and return the physical pages 204 * associated with that region. 205 */ 206void kmem_free(map, addr, size) 207 vm_map_t map; 208 register vm_offset_t addr; 209 vm_size_t size; 210{ 211 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 212} 213 214/* 215 * kmem_suballoc: 216 * 217 * Allocates a map to manage a subrange 218 * of the kernel virtual address space. 219 * 220 * Arguments are as follows: 221 * 222 * parent Map to take range from 223 * size Size of range to find 224 * min, max Returned endpoints of map 225 * pageable Can the region be paged 226 */ 227vm_map_t kmem_suballoc(parent, min, max, size, pageable) 228 register vm_map_t parent; 229 vm_offset_t *min, *max; 230 register vm_size_t size; 231 boolean_t pageable; 232{ 233 register int ret; 234 vm_map_t result; 235 236 size = round_page(size); 237 238 *min = (vm_offset_t) vm_map_min(parent); 239 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 240 min, size, TRUE); 241 if (ret != KERN_SUCCESS) { 242 printf("kmem_suballoc: bad status return of %d.\n", ret); 243 panic("kmem_suballoc"); 244 } 245 *max = *min + size; 246 pmap_reference(vm_map_pmap(parent)); 247 result = vm_map_create(vm_map_pmap(parent), *min, *max, pageable); 248 if (result == NULL) 249 panic("kmem_suballoc: cannot create submap"); 250 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 251 panic("kmem_suballoc: unable to change range to submap"); 252 return(result); 253} 254 255/* 256 * Allocate wired-down memory in the kernel's address map for the higher 257 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 258 * kmem_alloc() because we may need to allocate memory at interrupt 259 * level where we cannot block (canwait == FALSE). 260 * 261 * This routine has its own private kernel submap (kmem_map) and object 262 * (kmem_object). This, combined with the fact that only malloc uses 263 * this routine, ensures that we will never block in map or object waits. 264 * 265 * Note that this still only works in a uni-processor environment and 266 * when called at splhigh(). 267 * 268 * We don't worry about expanding the map (adding entries) since entries 269 * for wired maps are statically allocated. 270 */ 271vm_offset_t 272kmem_malloc(map, size, canwait) 273 register vm_map_t map; 274 register vm_size_t size; 275 boolean_t canwait; 276{ 277 register vm_offset_t offset, i; 278 vm_map_entry_t entry; 279 vm_offset_t addr; 280 vm_page_t m; 281 extern vm_object_t kmem_object; 282 283 if (map != kmem_map && map != mb_map) 284 panic("kern_malloc_alloc: map != {kmem,mb}_map"); 285 286 size = round_page(size); 287 addr = vm_map_min(map); 288 289 /* 290 * Locate sufficient space in the map. This will give us the 291 * final virtual address for the new memory, and thus will tell 292 * us the offset within the kernel map. 293 */ 294 vm_map_lock(map); 295 if (vm_map_findspace(map, 0, size, &addr)) { 296 vm_map_unlock(map); 297#if 0 298 if (canwait) /* XXX should wait */ 299 panic("kmem_malloc: %s too small", 300 map == kmem_map ? "kmem_map" : "mb_map"); 301#endif 302 if (canwait) 303 panic("kmem_malloc: map too small"); 304 return (0); 305 } 306 offset = addr - vm_map_min(kmem_map); 307 vm_object_reference(kmem_object); 308 vm_map_insert(map, kmem_object, offset, addr, addr + size); 309 310 /* 311 * If we can wait, just mark the range as wired 312 * (will fault pages as necessary). 313 */ 314 if (canwait) { 315 vm_map_unlock(map); 316 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, 317 FALSE); 318 vm_map_simplify(map, addr); 319 return(addr); 320 } 321 322 /* 323 * If we cannot wait then we must allocate all memory up front, 324 * pulling it off the active queue to prevent pageout. 325 */ 326 vm_object_lock(kmem_object); 327 for (i = 0; i < size; i += PAGE_SIZE) { 328 m = vm_page_alloc(kmem_object, offset + i); 329 330 /* 331 * Ran out of space, free everything up and return. 332 * Don't need to lock page queues here as we know 333 * that the pages we got aren't on any queues. 334 */ 335 if (m == NULL) { 336 while (i != 0) { 337 i -= PAGE_SIZE; 338 m = vm_page_lookup(kmem_object, offset + i); 339 vm_page_free(m); 340 } 341 vm_object_unlock(kmem_object); 342 vm_map_delete(map, addr, addr + size); 343 vm_map_unlock(map); 344 return(0); 345 } 346#if 0 347 vm_page_zero_fill(m); 348#endif 349 m->flags &= ~PG_BUSY; 350 } 351 vm_object_unlock(kmem_object); 352 353 /* 354 * Mark map entry as non-pageable. 355 * Assert: vm_map_insert() will never be able to extend the previous 356 * entry so there will be a new entry exactly corresponding to this 357 * address range and it will have wired_count == 0. 358 */ 359 if (!vm_map_lookup_entry(map, addr, &entry) || 360 entry->start != addr || entry->end != addr + size || 361 entry->wired_count) 362 panic("kmem_malloc: entry not found or misaligned"); 363 entry->wired_count++; 364 365 /* 366 * Loop thru pages, entering them in the pmap. 367 * (We cannot add them to the wired count without 368 * wrapping the vm_page_queue_lock in splimp...) 369 */ 370 for (i = 0; i < size; i += PAGE_SIZE) { 371 vm_object_lock(kmem_object); 372 m = vm_page_lookup(kmem_object, offset + i); 373 vm_object_unlock(kmem_object); 374 pmap_enter(map->pmap, addr + i, VM_PAGE_TO_PHYS(m), 375 VM_PROT_DEFAULT, TRUE); 376 } 377 vm_map_unlock(map); 378 379 vm_map_simplify(map, addr); 380 return(addr); 381} 382 383/* 384 * kmem_alloc_wait 385 * 386 * Allocates pageable memory from a sub-map of the kernel. If the submap 387 * has no room, the caller sleeps waiting for more memory in the submap. 388 * 389 */ 390vm_offset_t kmem_alloc_wait(map, size) 391 vm_map_t map; 392 vm_size_t size; 393{ 394 vm_offset_t addr; 395 396 size = round_page(size); 397 398 for (;;) { 399 /* 400 * To make this work for more than one map, 401 * use the map's lock to lock out sleepers/wakers. 402 */ 403 vm_map_lock(map); 404 if (vm_map_findspace(map, 0, size, &addr) == 0) 405 break; 406 /* no space now; see if we can ever get space */ 407 if (vm_map_max(map) - vm_map_min(map) < size) { 408 vm_map_unlock(map); 409 return (0); 410 } 411 assert_wait((int)map, TRUE); 412 vm_map_unlock(map); 413 thread_block("kmaw"); 414 } 415 vm_map_insert(map, NULL, (vm_offset_t)0, addr, addr + size); 416 vm_map_unlock(map); 417 return (addr); 418} 419 420/* 421 * kmem_free_wakeup 422 * 423 * Returns memory to a submap of the kernel, and wakes up any threads 424 * waiting for memory in that map. 425 */ 426void kmem_free_wakeup(map, addr, size) 427 vm_map_t map; 428 vm_offset_t addr; 429 vm_size_t size; 430{ 431 vm_map_lock(map); 432 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 433 thread_wakeup((int)map); 434 vm_map_unlock(map); 435} 436 437/* 438 * Create the kernel map; insert a mapping covering kernel text, data, bss, 439 * and all space allocated thus far (`boostrap' data). The new map will thus 440 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and 441 * the range between `start' and `end' as free. 442 */ 443void kmem_init(start, end) 444 vm_offset_t start, end; 445{ 446 register vm_map_t m; 447 448 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end, FALSE); 449 vm_map_lock(m); 450 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 451 kernel_map = m; 452 (void) vm_map_insert(m, NULL, (vm_offset_t)0, 453 VM_MIN_KERNEL_ADDRESS, start); 454 /* ... and ending with the completion of the above `insert' */ 455 vm_map_unlock(m); 456} 457