uvm_km.c revision 1.52
1/* $NetBSD: uvm_km.c,v 1.52 2001/09/15 20:36:46 chs Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69#include "opt_uvmhist.h" 70 71/* 72 * uvm_km.c: handle kernel memory allocation and management 73 */ 74 75/* 76 * overview of kernel memory management: 77 * 78 * the kernel virtual address space is mapped by "kernel_map." kernel_map 79 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 80 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 81 * 82 * the kernel_map has several "submaps." submaps can only appear in 83 * the kernel_map (user processes can't use them). submaps "take over" 84 * the management of a sub-range of the kernel's address space. submaps 85 * are typically allocated at boot time and are never released. kernel 86 * virtual address space that is mapped by a submap is locked by the 87 * submap's lock -- not the kernel_map's lock. 88 * 89 * thus, the useful feature of submaps is that they allow us to break 90 * up the locking and protection of the kernel address space into smaller 91 * chunks. 92 * 93 * the vm system has several standard kernel submaps, including: 94 * kmem_map => contains only wired kernel memory for the kernel 95 * malloc. *** access to kmem_map must be protected 96 * by splvm() because we are allowed to call malloc() 97 * at interrupt time *** 98 * mb_map => memory for large mbufs, *** protected by splvm *** 99 * pager_map => used to map "buf" structures into kernel space 100 * exec_map => used during exec to handle exec args 101 * etc... 102 * 103 * the kernel allocates its private memory out of special uvm_objects whose 104 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 105 * are "special" and never die). all kernel objects should be thought of 106 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 107 * object is equal to the size of kernel virtual address space (i.e. the 108 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 109 * 110 * most kernel private memory lives in kernel_object. the only exception 111 * to this is for memory that belongs to submaps that must be protected 112 * by splvm(). pages in these submaps are not assigned to an object. 113 * 114 * note that just because a kernel object spans the entire kernel virutal 115 * address space doesn't mean that it has to be mapped into the entire space. 116 * large chunks of a kernel object's space go unused either because 117 * that area of kernel VM is unmapped, or there is some other type of 118 * object mapped into that range (e.g. a vnode). for submap's kernel 119 * objects, the only part of the object that can ever be populated is the 120 * offsets that are managed by the submap. 121 * 122 * note that the "offset" in a kernel object is always the kernel virtual 123 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 124 * example: 125 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 126 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 127 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 128 * then that means that the page at offset 0x235000 in kernel_object is 129 * mapped at 0xf8235000. 130 * 131 * kernel object have one other special property: when the kernel virtual 132 * memory mapping them is unmapped, the backing memory in the object is 133 * freed right away. this is done with the uvm_km_pgremove() function. 134 * this has to be done because there is no backing store for kernel pages 135 * and no need to save them after they are no longer referenced. 136 */ 137 138#include <sys/param.h> 139#include <sys/systm.h> 140#include <sys/proc.h> 141 142#include <uvm/uvm.h> 143 144/* 145 * global data structures 146 */ 147 148struct vm_map *kernel_map = NULL; 149 150/* 151 * local data structues 152 */ 153 154static struct vm_map kernel_map_store; 155 156/* 157 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 158 * KVM already allocated for text, data, bss, and static data structures). 159 * 160 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 161 * we assume that [min -> start] has already been allocated and that 162 * "end" is the end. 163 */ 164 165void 166uvm_km_init(start, end) 167 vaddr_t start, end; 168{ 169 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 170 171 /* 172 * next, init kernel memory objects. 173 */ 174 175 /* kernel_object: for pageable anonymous kernel memory */ 176 uao_init(); 177 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 178 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 179 180 /* 181 * init the map and reserve allready allocated kernel space 182 * before installing. 183 */ 184 185 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 186 kernel_map_store.pmap = pmap_kernel(); 187 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 188 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 189 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != 0) 190 panic("uvm_km_init: could not reserve space for kernel"); 191 192 /* 193 * install! 194 */ 195 196 kernel_map = &kernel_map_store; 197} 198 199/* 200 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 201 * is allocated all references to that area of VM must go through it. this 202 * allows the locking of VAs in kernel_map to be broken up into regions. 203 * 204 * => if `fixed' is true, *min specifies where the region described 205 * by the submap must start 206 * => if submap is non NULL we use that as the submap, otherwise we 207 * alloc a new map 208 */ 209struct vm_map * 210uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 211 struct vm_map *map; 212 vaddr_t *min, *max; /* IN/OUT, OUT */ 213 vsize_t size; 214 int flags; 215 boolean_t fixed; 216 struct vm_map *submap; 217{ 218 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 219 220 size = round_page(size); /* round up to pagesize */ 221 222 /* 223 * first allocate a blank spot in the parent map 224 */ 225 226 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 227 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 228 UVM_ADV_RANDOM, mapflags)) != 0) { 229 panic("uvm_km_suballoc: unable to allocate space in parent map"); 230 } 231 232 /* 233 * set VM bounds (min is filled in by uvm_map) 234 */ 235 236 *max = *min + size; 237 238 /* 239 * add references to pmap and create or init the submap 240 */ 241 242 pmap_reference(vm_map_pmap(map)); 243 if (submap == NULL) { 244 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 245 if (submap == NULL) 246 panic("uvm_km_suballoc: unable to create submap"); 247 } else { 248 uvm_map_setup(submap, *min, *max, flags); 249 submap->pmap = vm_map_pmap(map); 250 } 251 252 /* 253 * now let uvm_map_submap plug in it... 254 */ 255 256 if (uvm_map_submap(map, *min, *max, submap) != 0) 257 panic("uvm_km_suballoc: submap allocation failed"); 258 259 return(submap); 260} 261 262/* 263 * uvm_km_pgremove: remove pages from a kernel uvm_object. 264 * 265 * => when you unmap a part of anonymous kernel memory you want to toss 266 * the pages right away. (this gets called from uvm_unmap_...). 267 */ 268 269void 270uvm_km_pgremove(uobj, start, end) 271 struct uvm_object *uobj; 272 vaddr_t start, end; 273{ 274 boolean_t by_list; 275 struct vm_page *pg, *nextpg; 276 voff_t curoff, nextoff; 277 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 278 279 KASSERT(uobj->pgops == &aobj_pager); 280 simple_lock(&uobj->vmobjlock); 281 282 /* choose cheapest traversal */ 283 by_list = (uobj->uo_npages <= 284 ((end - start) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY); 285 if (by_list) 286 goto loop_by_list; 287 288 for (curoff = start; curoff < end; curoff = nextoff) { 289 nextoff = curoff + PAGE_SIZE; 290 pg = uvm_pagelookup(uobj, curoff); 291 if (pg == NULL) { 292 continue; 293 } 294 if (pg->flags & PG_BUSY) { 295 pg->flags |= PG_WANTED; 296 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 297 "km_pgrm", 0); 298 simple_lock(&uobj->vmobjlock); 299 nextoff = curoff; 300 continue; 301 } 302 303 /* 304 * free the swap slot, then the page. 305 */ 306 307 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 308 uvm_lock_pageq(); 309 uvm_pagefree(pg); 310 uvm_unlock_pageq(); 311 } 312 simple_unlock(&uobj->vmobjlock); 313 return; 314 315loop_by_list: 316 for (pg = TAILQ_FIRST(&uobj->memq); pg != NULL; pg = nextpg) { 317 nextpg = TAILQ_NEXT(pg, listq); 318 if (pg->offset < start || pg->offset >= end) { 319 continue; 320 } 321 if (pg->flags & PG_BUSY) { 322 pg->flags |= PG_WANTED; 323 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 324 "km_pgrm", 0); 325 simple_lock(&uobj->vmobjlock); 326 nextpg = TAILQ_FIRST(&uobj->memq); 327 continue; 328 } 329 330 /* 331 * free the swap slot, then the page. 332 */ 333 334 uao_dropswap(uobj, pg->offset >> PAGE_SHIFT); 335 uvm_lock_pageq(); 336 uvm_pagefree(pg); 337 uvm_unlock_pageq(); 338 } 339 simple_unlock(&uobj->vmobjlock); 340} 341 342 343/* 344 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 345 * maps 346 * 347 * => when you unmap a part of anonymous kernel memory you want to toss 348 * the pages right away. (this is called from uvm_unmap_...). 349 * => none of the pages will ever be busy, and none of them will ever 350 * be on the active or inactive queues (because they have no object). 351 */ 352 353void 354uvm_km_pgremove_intrsafe(start, end) 355 vaddr_t start, end; 356{ 357 struct vm_page *pg; 358 paddr_t pa; 359 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 360 361 for (; start < end; start += PAGE_SIZE) { 362 if (!pmap_extract(pmap_kernel(), start, &pa)) { 363 continue; 364 } 365 pg = PHYS_TO_VM_PAGE(pa); 366 KASSERT(pg); 367 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 368 uvm_pagefree(pg); 369 } 370} 371 372 373/* 374 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 375 * 376 * => we map wired memory into the specified map using the obj passed in 377 * => NOTE: we can return NULL even if we can wait if there is not enough 378 * free VM space in the map... caller should be prepared to handle 379 * this case. 380 * => we return KVA of memory allocated 381 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 382 * lock the map 383 */ 384 385vaddr_t 386uvm_km_kmemalloc(map, obj, size, flags) 387 struct vm_map *map; 388 struct uvm_object *obj; 389 vsize_t size; 390 int flags; 391{ 392 vaddr_t kva, loopva; 393 vaddr_t offset; 394 vsize_t loopsize; 395 struct vm_page *pg; 396 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 397 398 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 399 map, obj, size, flags); 400 KASSERT(vm_map_pmap(map) == pmap_kernel()); 401 402 /* 403 * setup for call 404 */ 405 406 size = round_page(size); 407 kva = vm_map_min(map); /* hint */ 408 409 /* 410 * allocate some virtual space 411 */ 412 413 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 414 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 415 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 416 != 0)) { 417 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 418 return(0); 419 } 420 421 /* 422 * if all we wanted was VA, return now 423 */ 424 425 if (flags & UVM_KMF_VALLOC) { 426 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 427 return(kva); 428 } 429 430 /* 431 * recover object offset from virtual address 432 */ 433 434 offset = kva - vm_map_min(kernel_map); 435 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 436 437 /* 438 * now allocate and map in the memory... note that we are the only ones 439 * whom should ever get a handle on this area of VM. 440 */ 441 442 loopva = kva; 443 loopsize = size; 444 while (loopsize) { 445 if (obj) { 446 simple_lock(&obj->vmobjlock); 447 } 448 pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE); 449 if (__predict_true(pg != NULL)) { 450 pg->flags &= ~PG_BUSY; /* new page */ 451 UVM_PAGE_OWN(pg, NULL); 452 } 453 if (obj) { 454 simple_unlock(&obj->vmobjlock); 455 } 456 457 /* 458 * out of memory? 459 */ 460 461 if (__predict_false(pg == NULL)) { 462 if (flags & UVM_KMF_NOWAIT) { 463 /* free everything! */ 464 uvm_unmap(map, kva, kva + size); 465 return(0); 466 } else { 467 uvm_wait("km_getwait2"); /* sleep here */ 468 continue; 469 } 470 } 471 472 /* 473 * map it in 474 */ 475 476 if (obj == NULL) { 477 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 478 VM_PROT_ALL); 479 } else { 480 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 481 UVM_PROT_ALL, 482 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 483 } 484 loopva += PAGE_SIZE; 485 offset += PAGE_SIZE; 486 loopsize -= PAGE_SIZE; 487 } 488 489 pmap_update(pmap_kernel()); 490 491 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 492 return(kva); 493} 494 495/* 496 * uvm_km_free: free an area of kernel memory 497 */ 498 499void 500uvm_km_free(map, addr, size) 501 struct vm_map *map; 502 vaddr_t addr; 503 vsize_t size; 504{ 505 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 506} 507 508/* 509 * uvm_km_free_wakeup: free an area of kernel memory and wake up 510 * anyone waiting for vm space. 511 * 512 * => XXX: "wanted" bit + unlock&wait on other end? 513 */ 514 515void 516uvm_km_free_wakeup(map, addr, size) 517 struct vm_map *map; 518 vaddr_t addr; 519 vsize_t size; 520{ 521 struct vm_map_entry *dead_entries; 522 523 vm_map_lock(map); 524 uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size), 525 &dead_entries); 526 wakeup(map); 527 vm_map_unlock(map); 528 if (dead_entries != NULL) 529 uvm_unmap_detach(dead_entries, 0); 530} 531 532/* 533 * uvm_km_alloc1: allocate wired down memory in the kernel map. 534 * 535 * => we can sleep if needed 536 */ 537 538vaddr_t 539uvm_km_alloc1(map, size, zeroit) 540 struct vm_map *map; 541 vsize_t size; 542 boolean_t zeroit; 543{ 544 vaddr_t kva, loopva, offset; 545 struct vm_page *pg; 546 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 547 548 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 549 KASSERT(vm_map_pmap(map) == pmap_kernel()); 550 551 size = round_page(size); 552 kva = vm_map_min(map); /* hint */ 553 554 /* 555 * allocate some virtual space 556 */ 557 558 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 559 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 560 UVM_INH_NONE, UVM_ADV_RANDOM, 561 0)) != 0)) { 562 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 563 return(0); 564 } 565 566 /* 567 * recover object offset from virtual address 568 */ 569 570 offset = kva - vm_map_min(kernel_map); 571 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 572 573 /* 574 * now allocate the memory. 575 */ 576 577 loopva = kva; 578 while (size) { 579 simple_lock(&uvm.kernel_object->vmobjlock); 580 KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL); 581 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 582 if (pg) { 583 pg->flags &= ~PG_BUSY; 584 UVM_PAGE_OWN(pg, NULL); 585 } 586 simple_unlock(&uvm.kernel_object->vmobjlock); 587 if (pg == NULL) { 588 uvm_wait("km_alloc1w"); 589 continue; 590 } 591 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 592 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 593 loopva += PAGE_SIZE; 594 offset += PAGE_SIZE; 595 size -= PAGE_SIZE; 596 } 597 pmap_update(map->pmap); 598 599 /* 600 * zero on request (note that "size" is now zero due to the above loop 601 * so we need to subtract kva from loopva to reconstruct the size). 602 */ 603 604 if (zeroit) 605 memset((caddr_t)kva, 0, loopva - kva); 606 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 607 return(kva); 608} 609 610/* 611 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 612 * 613 * => memory is not allocated until fault time 614 */ 615 616vaddr_t 617uvm_km_valloc(map, size) 618 struct vm_map *map; 619 vsize_t size; 620{ 621 return(uvm_km_valloc_align(map, size, 0)); 622} 623 624vaddr_t 625uvm_km_valloc_align(map, size, align) 626 struct vm_map *map; 627 vsize_t size; 628 vsize_t align; 629{ 630 vaddr_t kva; 631 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 632 633 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 634 KASSERT(vm_map_pmap(map) == pmap_kernel()); 635 636 size = round_page(size); 637 kva = vm_map_min(map); /* hint */ 638 639 /* 640 * allocate some virtual space. will be demand filled by kernel_object. 641 */ 642 643 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 644 UVM_UNKNOWN_OFFSET, align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 645 UVM_INH_NONE, UVM_ADV_RANDOM, 646 0)) != 0)) { 647 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 648 return(0); 649 } 650 651 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 652 return(kva); 653} 654 655/* 656 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 657 * 658 * => memory is not allocated until fault time 659 * => if no room in map, wait for space to free, unless requested size 660 * is larger than map (in which case we return 0) 661 */ 662 663vaddr_t 664uvm_km_valloc_prefer_wait(map, size, prefer) 665 struct vm_map *map; 666 vsize_t size; 667 voff_t prefer; 668{ 669 vaddr_t kva; 670 UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist); 671 672 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 673 KASSERT(vm_map_pmap(map) == pmap_kernel()); 674 675 size = round_page(size); 676 if (size > vm_map_max(map) - vm_map_min(map)) 677 return(0); 678 679 for (;;) { 680 kva = vm_map_min(map); /* hint */ 681 682 /* 683 * allocate some virtual space. will be demand filled 684 * by kernel_object. 685 */ 686 687 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 688 prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL, 689 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 690 == 0)) { 691 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 692 return(kva); 693 } 694 695 /* 696 * failed. sleep for a while (on map) 697 */ 698 699 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 700 tsleep((caddr_t)map, PVM, "vallocwait", 0); 701 } 702 /*NOTREACHED*/ 703} 704 705vaddr_t 706uvm_km_valloc_wait(map, size) 707 struct vm_map *map; 708 vsize_t size; 709{ 710 return uvm_km_valloc_prefer_wait(map, size, UVM_UNKNOWN_OFFSET); 711} 712 713/* Sanity; must specify both or none. */ 714#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 715 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 716#error Must specify MAP and UNMAP together. 717#endif 718 719/* 720 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 721 * 722 * => if the pmap specifies an alternate mapping method, we use it. 723 */ 724 725/* ARGSUSED */ 726vaddr_t 727uvm_km_alloc_poolpage1(map, obj, waitok) 728 struct vm_map *map; 729 struct uvm_object *obj; 730 boolean_t waitok; 731{ 732#if defined(PMAP_MAP_POOLPAGE) 733 struct vm_page *pg; 734 vaddr_t va; 735 736 again: 737 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 738 if (__predict_false(pg == NULL)) { 739 if (waitok) { 740 uvm_wait("plpg"); 741 goto again; 742 } else 743 return (0); 744 } 745 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 746 if (__predict_false(va == 0)) 747 uvm_pagefree(pg); 748 return (va); 749#else 750 vaddr_t va; 751 int s; 752 753 /* 754 * NOTE: We may be called with a map that doens't require splvm 755 * protection (e.g. kernel_map). However, it does not hurt to 756 * go to splvm in this case (since unprocted maps will never be 757 * accessed in interrupt context). 758 * 759 * XXX We may want to consider changing the interface to this 760 * XXX function. 761 */ 762 763 s = splvm(); 764 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 765 splx(s); 766 return (va); 767#endif /* PMAP_MAP_POOLPAGE */ 768} 769 770/* 771 * uvm_km_free_poolpage: free a previously allocated pool page 772 * 773 * => if the pmap specifies an alternate unmapping method, we use it. 774 */ 775 776/* ARGSUSED */ 777void 778uvm_km_free_poolpage1(map, addr) 779 struct vm_map *map; 780 vaddr_t addr; 781{ 782#if defined(PMAP_UNMAP_POOLPAGE) 783 paddr_t pa; 784 785 pa = PMAP_UNMAP_POOLPAGE(addr); 786 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 787#else 788 int s; 789 790 /* 791 * NOTE: We may be called with a map that doens't require splvm 792 * protection (e.g. kernel_map). However, it does not hurt to 793 * go to splvm in this case (since unprocted maps will never be 794 * accessed in interrupt context). 795 * 796 * XXX We may want to consider changing the interface to this 797 * XXX function. 798 */ 799 800 s = splvm(); 801 uvm_km_free(map, addr, PAGE_SIZE); 802 splx(s); 803#endif /* PMAP_UNMAP_POOLPAGE */ 804} 805