vm_kern.c revision 33758
1/* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * from: @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * 38 * 39 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 40 * All rights reserved. 41 * 42 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $Id: vm_kern.c,v 1.44 1998/02/06 12:14:23 eivind Exp $ 65 */ 66 67/* 68 * Kernel memory management. 69 */ 70 71#include <sys/param.h> 72#include <sys/systm.h> 73#include <sys/proc.h> 74#include <sys/malloc.h> 75#include <sys/syslog.h> 76 77#include <vm/vm.h> 78#include <vm/vm_param.h> 79#include <vm/vm_prot.h> 80#include <sys/lock.h> 81#include <vm/pmap.h> 82#include <vm/vm_map.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_pageout.h> 86#include <vm/vm_extern.h> 87 88vm_map_t kernel_map=0; 89vm_map_t kmem_map=0; 90vm_map_t exec_map=0; 91vm_map_t clean_map=0; 92vm_map_t u_map=0; 93vm_map_t buffer_map=0; 94vm_map_t mb_map=0; 95int mb_map_full=0; 96vm_map_t io_map=0; 97vm_map_t phys_map=0; 98 99/* 100 * kmem_alloc_pageable: 101 * 102 * Allocate pageable memory to the kernel's address map. 103 * "map" must be kernel_map or a submap of kernel_map. 104 */ 105 106vm_offset_t 107kmem_alloc_pageable(map, size) 108 vm_map_t map; 109 register vm_size_t size; 110{ 111 vm_offset_t addr; 112 register int result; 113 114 size = round_page(size); 115 addr = vm_map_min(map); 116 result = vm_map_find(map, NULL, (vm_offset_t) 0, 117 &addr, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 118 if (result != KERN_SUCCESS) { 119 return (0); 120 } 121 return (addr); 122} 123 124/* 125 * Allocate wired-down memory in the kernel's address map 126 * or a submap. 127 */ 128vm_offset_t 129kmem_alloc(map, size) 130 register vm_map_t map; 131 register vm_size_t size; 132{ 133 vm_offset_t addr; 134 register vm_offset_t offset; 135 vm_offset_t i; 136 137 size = round_page(size); 138 139 /* 140 * Use the kernel object for wired-down kernel pages. Assume that no 141 * region of the kernel object is referenced more than once. 142 */ 143 144 /* 145 * Locate sufficient space in the map. This will give us the final 146 * virtual address for the new memory, and thus will tell us the 147 * offset within the kernel map. 148 */ 149 vm_map_lock(map); 150 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 151 vm_map_unlock(map); 152 return (0); 153 } 154 offset = addr - VM_MIN_KERNEL_ADDRESS; 155 vm_object_reference(kernel_object); 156 vm_map_insert(map, kernel_object, offset, addr, addr + size, 157 VM_PROT_ALL, VM_PROT_ALL, 0); 158 vm_map_unlock(map); 159 160 /* 161 * Guarantee that there are pages already in this object before 162 * calling vm_map_pageable. This is to prevent the following 163 * scenario: 164 * 165 * 1) Threads have swapped out, so that there is a pager for the 166 * kernel_object. 2) The kmsg zone is empty, and so we are 167 * kmem_allocing a new page for it. 3) vm_map_pageable calls vm_fault; 168 * there is no page, but there is a pager, so we call 169 * pager_data_request. But the kmsg zone is empty, so we must 170 * kmem_alloc. 4) goto 1 5) Even if the kmsg zone is not empty: when 171 * we get the data back from the pager, it will be (very stale) 172 * non-zero data. kmem_alloc is defined to return zero-filled memory. 173 * 174 * We're intentionally not activating the pages we allocate to prevent a 175 * race with page-out. vm_map_pageable will wire the pages. 176 */ 177 178 for (i = 0; i < size; i += PAGE_SIZE) { 179 vm_page_t mem; 180 181 mem = vm_page_grab(kernel_object, OFF_TO_IDX(offset + i), 182 VM_ALLOC_ZERO | VM_ALLOC_RETRY); 183 if ((mem->flags & PG_ZERO) == 0) 184 vm_page_zero_fill(mem); 185 mem->flags &= ~(PG_BUSY|PG_ZERO); 186 mem->valid = VM_PAGE_BITS_ALL; 187 } 188 189 /* 190 * And finally, mark the data as non-pageable. 191 */ 192 193 (void) vm_map_pageable(map, (vm_offset_t) addr, addr + size, FALSE); 194 195 return (addr); 196} 197 198/* 199 * kmem_free: 200 * 201 * Release a region of kernel virtual memory allocated 202 * with kmem_alloc, and return the physical pages 203 * associated with that region. 204 */ 205void 206kmem_free(map, addr, size) 207 vm_map_t map; 208 register vm_offset_t addr; 209 vm_size_t size; 210{ 211 (void) vm_map_remove(map, trunc_page(addr), round_page(addr + size)); 212} 213 214/* 215 * kmem_suballoc: 216 * 217 * Allocates a map to manage a subrange 218 * of the kernel virtual address space. 219 * 220 * Arguments are as follows: 221 * 222 * parent Map to take range from 223 * size Size of range to find 224 * min, max Returned endpoints of map 225 * pageable Can the region be paged 226 */ 227vm_map_t 228kmem_suballoc(parent, min, max, size) 229 register vm_map_t parent; 230 vm_offset_t *min, *max; 231 register vm_size_t size; 232{ 233 register int ret; 234 vm_map_t result; 235 236 size = round_page(size); 237 238 *min = (vm_offset_t) vm_map_min(parent); 239 ret = vm_map_find(parent, NULL, (vm_offset_t) 0, 240 min, size, TRUE, VM_PROT_ALL, VM_PROT_ALL, 0); 241 if (ret != KERN_SUCCESS) { 242 printf("kmem_suballoc: bad status return of %d.\n", ret); 243 panic("kmem_suballoc"); 244 } 245 *max = *min + size; 246 pmap_reference(vm_map_pmap(parent)); 247 result = vm_map_create(vm_map_pmap(parent), *min, *max); 248 if (result == NULL) 249 panic("kmem_suballoc: cannot create submap"); 250 if ((ret = vm_map_submap(parent, *min, *max, result)) != KERN_SUCCESS) 251 panic("kmem_suballoc: unable to change range to submap"); 252 return (result); 253} 254 255/* 256 * Allocate wired-down memory in the kernel's address map for the higher 257 * level kernel memory allocator (kern/kern_malloc.c). We cannot use 258 * kmem_alloc() because we may need to allocate memory at interrupt 259 * level where we cannot block (canwait == FALSE). 260 * 261 * This routine has its own private kernel submap (kmem_map) and object 262 * (kmem_object). This, combined with the fact that only malloc uses 263 * this routine, ensures that we will never block in map or object waits. 264 * 265 * Note that this still only works in a uni-processor environment and 266 * when called at splhigh(). 267 * 268 * We don't worry about expanding the map (adding entries) since entries 269 * for wired maps are statically allocated. 270 */ 271vm_offset_t 272kmem_malloc(map, size, waitflag) 273 register vm_map_t map; 274 register vm_size_t size; 275 boolean_t waitflag; 276{ 277 register vm_offset_t offset, i; 278 vm_map_entry_t entry; 279 vm_offset_t addr; 280 vm_page_t m; 281 282 if (map != kmem_map && map != mb_map) 283 panic("kmem_malloc: map != {kmem,mb}_map"); 284 285 size = round_page(size); 286 addr = vm_map_min(map); 287 288 /* 289 * Locate sufficient space in the map. This will give us the final 290 * virtual address for the new memory, and thus will tell us the 291 * offset within the kernel map. 292 */ 293 vm_map_lock(map); 294 if (vm_map_findspace(map, vm_map_min(map), size, &addr)) { 295 vm_map_unlock(map); 296 if (map == mb_map) { 297 mb_map_full = TRUE; 298 log(LOG_ERR, "Out of mbuf clusters - increase maxusers!\n"); 299 return (0); 300 } 301 if (waitflag == M_WAITOK) 302 panic("kmem_malloc(%d): kmem_map too small: %d total allocated", 303 size, map->size); 304 return (0); 305 } 306 offset = addr - VM_MIN_KERNEL_ADDRESS; 307 vm_object_reference(kmem_object); 308 vm_map_insert(map, kmem_object, offset, addr, addr + size, 309 VM_PROT_ALL, VM_PROT_ALL, 0); 310 311 for (i = 0; i < size; i += PAGE_SIZE) { 312retry: 313 m = vm_page_alloc(kmem_object, OFF_TO_IDX(offset + i), 314 (waitflag == M_NOWAIT) ? VM_ALLOC_INTERRUPT : VM_ALLOC_SYSTEM); 315 316 /* 317 * Ran out of space, free everything up and return. Don't need 318 * to lock page queues here as we know that the pages we got 319 * aren't on any queues. 320 */ 321 if (m == NULL) { 322 if (waitflag == M_WAITOK) { 323 VM_WAIT; 324 goto retry; 325 } 326 while (i != 0) { 327 i -= PAGE_SIZE; 328 m = vm_page_lookup(kmem_object, 329 OFF_TO_IDX(offset + i)); 330 vm_page_free(m); 331 } 332 vm_map_delete(map, addr, addr + size); 333 vm_map_unlock(map); 334 return (0); 335 } 336 m->flags &= ~PG_ZERO; 337 m->valid = VM_PAGE_BITS_ALL; 338 } 339 340 /* 341 * Mark map entry as non-pageable. Assert: vm_map_insert() will never 342 * be able to extend the previous entry so there will be a new entry 343 * exactly corresponding to this address range and it will have 344 * wired_count == 0. 345 */ 346 if (!vm_map_lookup_entry(map, addr, &entry) || 347 entry->start != addr || entry->end != addr + size || 348 entry->wired_count) 349 panic("kmem_malloc: entry not found or misaligned"); 350 entry->wired_count++; 351 352 vm_map_simplify_entry(map, entry); 353 354 /* 355 * Loop thru pages, entering them in the pmap. (We cannot add them to 356 * the wired count without wrapping the vm_page_queue_lock in 357 * splimp...) 358 */ 359 for (i = 0; i < size; i += PAGE_SIZE) { 360 m = vm_page_lookup(kmem_object, OFF_TO_IDX(offset + i)); 361 vm_page_wire(m); 362 PAGE_WAKEUP(m); 363 pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), 364 VM_PROT_ALL, 1); 365 m->flags |= PG_MAPPED | PG_WRITEABLE | PG_REFERENCED; 366 } 367 vm_map_unlock(map); 368 369 return (addr); 370} 371 372/* 373 * kmem_alloc_wait 374 * 375 * Allocates pageable memory from a sub-map of the kernel. If the submap 376 * has no room, the caller sleeps waiting for more memory in the submap. 377 * 378 */ 379vm_offset_t 380kmem_alloc_wait(map, size) 381 vm_map_t map; 382 vm_size_t size; 383{ 384 vm_offset_t addr; 385 386 size = round_page(size); 387 388 for (;;) { 389 /* 390 * To make this work for more than one map, use the map's lock 391 * to lock out sleepers/wakers. 392 */ 393 vm_map_lock(map); 394 if (vm_map_findspace(map, vm_map_min(map), size, &addr) == 0) 395 break; 396 /* no space now; see if we can ever get space */ 397 if (vm_map_max(map) - vm_map_min(map) < size) { 398 vm_map_unlock(map); 399 return (0); 400 } 401 vm_map_unlock(map); 402 tsleep(map, PVM, "kmaw", 0); 403 } 404 vm_map_insert(map, NULL, (vm_offset_t) 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0); 405 vm_map_unlock(map); 406 return (addr); 407} 408 409/* 410 * kmem_free_wakeup 411 * 412 * Returns memory to a submap of the kernel, and wakes up any processes 413 * waiting for memory in that map. 414 */ 415void 416kmem_free_wakeup(map, addr, size) 417 vm_map_t map; 418 vm_offset_t addr; 419 vm_size_t size; 420{ 421 vm_map_lock(map); 422 (void) vm_map_delete(map, trunc_page(addr), round_page(addr + size)); 423 wakeup(map); 424 vm_map_unlock(map); 425} 426 427/* 428 * Create the kernel map; insert a mapping covering kernel text, data, bss, 429 * and all space allocated thus far (`boostrap' data). The new map will thus 430 * map the range between VM_MIN_KERNEL_ADDRESS and `start' as allocated, and 431 * the range between `start' and `end' as free. 432 */ 433void 434kmem_init(start, end) 435 vm_offset_t start, end; 436{ 437 register vm_map_t m; 438 439 m = vm_map_create(kernel_pmap, VM_MIN_KERNEL_ADDRESS, end); 440 vm_map_lock(m); 441 /* N.B.: cannot use kgdb to debug, starting with this assignment ... */ 442 kernel_map = m; 443 kernel_map->system_map = 1; 444 (void) vm_map_insert(m, NULL, (vm_offset_t) 0, 445 VM_MIN_KERNEL_ADDRESS, start, VM_PROT_ALL, VM_PROT_ALL, 0); 446 /* ... and ending with the completion of the above `insert' */ 447 vm_map_unlock(m); 448} 449