uvm_km.c revision 1.71
1/* $NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * uvm_km.c: handle kernel memory allocation and management 71 */ 72 73/* 74 * overview of kernel memory management: 75 * 76 * the kernel virtual address space is mapped by "kernel_map." kernel_map 77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 79 * 80 * the kernel_map has several "submaps." submaps can only appear in 81 * the kernel_map (user processes can't use them). submaps "take over" 82 * the management of a sub-range of the kernel's address space. submaps 83 * are typically allocated at boot time and are never released. kernel 84 * virtual address space that is mapped by a submap is locked by the 85 * submap's lock -- not the kernel_map's lock. 86 * 87 * thus, the useful feature of submaps is that they allow us to break 88 * up the locking and protection of the kernel address space into smaller 89 * chunks. 90 * 91 * the vm system has several standard kernel submaps, including: 92 * kmem_map => contains only wired kernel memory for the kernel 93 * malloc. *** access to kmem_map must be protected 94 * by splvm() because we are allowed to call malloc() 95 * at interrupt time *** 96 * mb_map => memory for large mbufs, *** protected by splvm *** 97 * pager_map => used to map "buf" structures into kernel space 98 * exec_map => used during exec to handle exec args 99 * etc... 100 * 101 * the kernel allocates its private memory out of special uvm_objects whose 102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 103 * are "special" and never die). all kernel objects should be thought of 104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 105 * object is equal to the size of kernel virtual address space (i.e. the 106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 107 * 108 * most kernel private memory lives in kernel_object. the only exception 109 * to this is for memory that belongs to submaps that must be protected 110 * by splvm(). pages in these submaps are not assigned to an object. 111 * 112 * note that just because a kernel object spans the entire kernel virutal 113 * address space doesn't mean that it has to be mapped into the entire space. 114 * large chunks of a kernel object's space go unused either because 115 * that area of kernel VM is unmapped, or there is some other type of 116 * object mapped into that range (e.g. a vnode). for submap's kernel 117 * objects, the only part of the object that can ever be populated is the 118 * offsets that are managed by the submap. 119 * 120 * note that the "offset" in a kernel object is always the kernel virtual 121 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 122 * example: 123 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 124 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 125 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 126 * then that means that the page at offset 0x235000 in kernel_object is 127 * mapped at 0xf8235000. 128 * 129 * kernel object have one other special property: when the kernel virtual 130 * memory mapping them is unmapped, the backing memory in the object is 131 * freed right away. this is done with the uvm_km_pgremove() function. 132 * this has to be done because there is no backing store for kernel pages 133 * and no need to save them after they are no longer referenced. 134 */ 135 136#include <sys/cdefs.h> 137__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.71 2005/01/01 21:02:13 yamt Exp $"); 138 139#include "opt_uvmhist.h" 140 141#include <sys/param.h> 142#include <sys/malloc.h> 143#include <sys/systm.h> 144#include <sys/proc.h> 145 146#include <uvm/uvm.h> 147 148/* 149 * global data structures 150 */ 151 152struct vm_map *kernel_map = NULL; 153 154/* 155 * local data structues 156 */ 157 158static struct vm_map_kernel kernel_map_store; 159static struct vm_map_entry kernel_first_mapent_store; 160 161/* 162 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 163 * KVM already allocated for text, data, bss, and static data structures). 164 * 165 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 166 * we assume that [min -> start] has already been allocated and that 167 * "end" is the end. 168 */ 169 170void 171uvm_km_init(start, end) 172 vaddr_t start, end; 173{ 174 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 175 176 /* 177 * next, init kernel memory objects. 178 */ 179 180 /* kernel_object: for pageable anonymous kernel memory */ 181 uao_init(); 182 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 183 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 184 185 /* 186 * init the map and reserve any space that might already 187 * have been allocated kernel space before installing. 188 */ 189 190 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 191 kernel_map_store.vmk_map.pmap = pmap_kernel(); 192 if (start != base) { 193 int error; 194 struct uvm_map_args args; 195 196 error = uvm_map_prepare(&kernel_map_store.vmk_map, 197 base, start - base, 198 NULL, UVM_UNKNOWN_OFFSET, 0, 199 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 200 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 201 if (!error) { 202 kernel_first_mapent_store.flags = 203 UVM_MAP_KERNEL | UVM_MAP_FIRST; 204 error = uvm_map_enter(&kernel_map_store.vmk_map, &args, 205 &kernel_first_mapent_store); 206 } 207 208 if (error) 209 panic( 210 "uvm_km_init: could not reserve space for kernel"); 211 } 212 213 /* 214 * install! 215 */ 216 217 kernel_map = &kernel_map_store.vmk_map; 218} 219 220/* 221 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 222 * is allocated all references to that area of VM must go through it. this 223 * allows the locking of VAs in kernel_map to be broken up into regions. 224 * 225 * => if `fixed' is true, *min specifies where the region described 226 * by the submap must start 227 * => if submap is non NULL we use that as the submap, otherwise we 228 * alloc a new map 229 */ 230struct vm_map * 231uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 232 struct vm_map *map; 233 vaddr_t *min, *max; /* IN/OUT, OUT */ 234 vsize_t size; 235 int flags; 236 boolean_t fixed; 237 struct vm_map_kernel *submap; 238{ 239 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 240 241 KASSERT(vm_map_pmap(map) == pmap_kernel()); 242 243 size = round_page(size); /* round up to pagesize */ 244 245 /* 246 * first allocate a blank spot in the parent map 247 */ 248 249 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 0, 250 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 251 UVM_ADV_RANDOM, mapflags)) != 0) { 252 panic("uvm_km_suballoc: unable to allocate space in parent map"); 253 } 254 255 /* 256 * set VM bounds (min is filled in by uvm_map) 257 */ 258 259 *max = *min + size; 260 261 /* 262 * add references to pmap and create or init the submap 263 */ 264 265 pmap_reference(vm_map_pmap(map)); 266 if (submap == NULL) { 267 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK); 268 if (submap == NULL) 269 panic("uvm_km_suballoc: unable to create submap"); 270 } 271 uvm_map_setup_kernel(submap, *min, *max, flags); 272 submap->vmk_map.pmap = vm_map_pmap(map); 273 274 /* 275 * now let uvm_map_submap plug in it... 276 */ 277 278 if (uvm_map_submap(map, *min, *max, &submap->vmk_map) != 0) 279 panic("uvm_km_suballoc: submap allocation failed"); 280 281 return(&submap->vmk_map); 282} 283 284/* 285 * uvm_km_pgremove: remove pages from a kernel uvm_object. 286 * 287 * => when you unmap a part of anonymous kernel memory you want to toss 288 * the pages right away. (this gets called from uvm_unmap_...). 289 */ 290 291void 292uvm_km_pgremove(uobj, start, end) 293 struct uvm_object *uobj; 294 vaddr_t start, end; 295{ 296 struct vm_page *pg; 297 voff_t curoff, nextoff; 298 int swpgonlydelta = 0; 299 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 300 301 KASSERT(uobj->pgops == &aobj_pager); 302 simple_lock(&uobj->vmobjlock); 303 304 for (curoff = start; curoff < end; curoff = nextoff) { 305 nextoff = curoff + PAGE_SIZE; 306 pg = uvm_pagelookup(uobj, curoff); 307 if (pg != NULL && pg->flags & PG_BUSY) { 308 pg->flags |= PG_WANTED; 309 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 310 "km_pgrm", 0); 311 simple_lock(&uobj->vmobjlock); 312 nextoff = curoff; 313 continue; 314 } 315 316 /* 317 * free the swap slot, then the page. 318 */ 319 320 if (pg == NULL && 321 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 322 swpgonlydelta++; 323 } 324 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 325 if (pg != NULL) { 326 uvm_lock_pageq(); 327 uvm_pagefree(pg); 328 uvm_unlock_pageq(); 329 } 330 } 331 simple_unlock(&uobj->vmobjlock); 332 333 if (swpgonlydelta > 0) { 334 simple_lock(&uvm.swap_data_lock); 335 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 336 uvmexp.swpgonly -= swpgonlydelta; 337 simple_unlock(&uvm.swap_data_lock); 338 } 339} 340 341 342/* 343 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 344 * maps 345 * 346 * => when you unmap a part of anonymous kernel memory you want to toss 347 * the pages right away. (this is called from uvm_unmap_...). 348 * => none of the pages will ever be busy, and none of them will ever 349 * be on the active or inactive queues (because they have no object). 350 */ 351 352void 353uvm_km_pgremove_intrsafe(start, end) 354 vaddr_t start, end; 355{ 356 struct vm_page *pg; 357 paddr_t pa; 358 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 359 360 for (; start < end; start += PAGE_SIZE) { 361 if (!pmap_extract(pmap_kernel(), start, &pa)) { 362 continue; 363 } 364 pg = PHYS_TO_VM_PAGE(pa); 365 KASSERT(pg); 366 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 367 uvm_pagefree(pg); 368 } 369} 370 371 372/* 373 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 374 * 375 * => we map wired memory into the specified map using the obj passed in 376 * => NOTE: we can return NULL even if we can wait if there is not enough 377 * free VM space in the map... caller should be prepared to handle 378 * this case. 379 * => we return KVA of memory allocated 380 * => align,prefer - passed on to uvm_map() 381 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 382 * lock the map 383 */ 384 385vaddr_t 386uvm_km_kmemalloc1(map, obj, size, align, prefer, flags) 387 struct vm_map *map; 388 struct uvm_object *obj; 389 vsize_t size; 390 vsize_t align; 391 voff_t prefer; 392 int flags; 393{ 394 vaddr_t kva, loopva; 395 vaddr_t offset; 396 vsize_t loopsize; 397 struct vm_page *pg; 398 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 399 400 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 401 map, obj, size, flags); 402 KASSERT(vm_map_pmap(map) == pmap_kernel()); 403 404 /* 405 * setup for call 406 */ 407 408 size = round_page(size); 409 kva = vm_map_min(map); /* hint */ 410 411 /* 412 * allocate some virtual space 413 */ 414 415 if (__predict_false(uvm_map(map, &kva, size, obj, prefer, align, 416 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 417 UVM_ADV_RANDOM, 418 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT)) 419 | UVM_FLAG_QUANTUM)) 420 != 0)) { 421 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 422 return(0); 423 } 424 425 /* 426 * if all we wanted was VA, return now 427 */ 428 429 if (flags & UVM_KMF_VALLOC) { 430 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 431 return(kva); 432 } 433 434 /* 435 * recover object offset from virtual address 436 */ 437 438 offset = kva - vm_map_min(kernel_map); 439 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 440 441 /* 442 * now allocate and map in the memory... note that we are the only ones 443 * whom should ever get a handle on this area of VM. 444 */ 445 446 loopva = kva; 447 loopsize = size; 448 while (loopsize) { 449 if (obj) { 450 simple_lock(&obj->vmobjlock); 451 } 452 pg = uvm_pagealloc(obj, offset, NULL, UVM_PGA_USERESERVE); 453 if (__predict_true(pg != NULL)) { 454 pg->flags &= ~PG_BUSY; /* new page */ 455 UVM_PAGE_OWN(pg, NULL); 456 } 457 if (obj) { 458 simple_unlock(&obj->vmobjlock); 459 } 460 461 /* 462 * out of memory? 463 */ 464 465 if (__predict_false(pg == NULL)) { 466 if ((flags & UVM_KMF_NOWAIT) || 467 ((flags & UVM_KMF_CANFAIL) && uvm_swapisfull())) { 468 /* free everything! */ 469 uvm_unmap(map, kva, kva + size); 470 return (0); 471 } else { 472 uvm_wait("km_getwait2"); /* sleep here */ 473 continue; 474 } 475 } 476 477 /* 478 * map it in 479 */ 480 481 if (obj == NULL) { 482 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 483 VM_PROT_READ | VM_PROT_WRITE); 484 } else { 485 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 486 UVM_PROT_ALL, 487 PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 488 } 489 loopva += PAGE_SIZE; 490 offset += PAGE_SIZE; 491 loopsize -= PAGE_SIZE; 492 } 493 494 pmap_update(pmap_kernel()); 495 496 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 497 return(kva); 498} 499 500/* 501 * uvm_km_free: free an area of kernel memory 502 */ 503 504void 505uvm_km_free(map, addr, size) 506 struct vm_map *map; 507 vaddr_t addr; 508 vsize_t size; 509{ 510 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 511} 512 513/* 514 * uvm_km_free_wakeup: free an area of kernel memory and wake up 515 * anyone waiting for vm space. 516 * 517 * => XXX: "wanted" bit + unlock&wait on other end? 518 */ 519 520void 521uvm_km_free_wakeup(map, addr, size) 522 struct vm_map *map; 523 vaddr_t addr; 524 vsize_t size; 525{ 526 struct vm_map_entry *dead_entries; 527 528 vm_map_lock(map); 529 uvm_unmap_remove(map, trunc_page(addr), round_page(addr + size), 530 &dead_entries, NULL); 531 wakeup(map); 532 vm_map_unlock(map); 533 if (dead_entries != NULL) 534 uvm_unmap_detach(dead_entries, 0); 535} 536 537/* 538 * uvm_km_alloc1: allocate wired down memory in the kernel map. 539 * 540 * => we can sleep if needed 541 */ 542 543vaddr_t 544uvm_km_alloc1(map, size, zeroit) 545 struct vm_map *map; 546 vsize_t size; 547 boolean_t zeroit; 548{ 549 vaddr_t kva, loopva, offset; 550 struct vm_page *pg; 551 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 552 553 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 554 KASSERT(vm_map_pmap(map) == pmap_kernel()); 555 556 size = round_page(size); 557 kva = vm_map_min(map); /* hint */ 558 559 /* 560 * allocate some virtual space 561 */ 562 563 if (__predict_false(uvm_map(map, &kva, size, uvm.kernel_object, 564 UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 565 UVM_INH_NONE, UVM_ADV_RANDOM, 566 UVM_FLAG_QUANTUM)) != 0)) { 567 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 568 return(0); 569 } 570 571 /* 572 * recover object offset from virtual address 573 */ 574 575 offset = kva - vm_map_min(kernel_map); 576 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 577 578 /* 579 * now allocate the memory. 580 */ 581 582 loopva = kva; 583 while (size) { 584 simple_lock(&uvm.kernel_object->vmobjlock); 585 KASSERT(uvm_pagelookup(uvm.kernel_object, offset) == NULL); 586 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 587 if (pg) { 588 pg->flags &= ~PG_BUSY; 589 UVM_PAGE_OWN(pg, NULL); 590 } 591 simple_unlock(&uvm.kernel_object->vmobjlock); 592 if (pg == NULL) { 593 uvm_wait("km_alloc1w"); 594 continue; 595 } 596 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 597 UVM_PROT_ALL, PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE); 598 loopva += PAGE_SIZE; 599 offset += PAGE_SIZE; 600 size -= PAGE_SIZE; 601 } 602 pmap_update(map->pmap); 603 604 /* 605 * zero on request (note that "size" is now zero due to the above loop 606 * so we need to subtract kva from loopva to reconstruct the size). 607 */ 608 609 if (zeroit) 610 memset((caddr_t)kva, 0, loopva - kva); 611 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 612 return(kva); 613} 614 615/* 616 * uvm_km_valloc1: allocate zero-fill memory in the kernel's address space 617 * 618 * => memory is not allocated until fault time 619 * => the align, prefer and flags parameters are passed on to uvm_map(). 620 * 621 * Note: this function is also the backend for these macros: 622 * uvm_km_valloc 623 * uvm_km_valloc_wait 624 * uvm_km_valloc_prefer 625 * uvm_km_valloc_prefer_wait 626 * uvm_km_valloc_align 627 */ 628 629vaddr_t 630uvm_km_valloc1(map, size, align, prefer, flags) 631 struct vm_map *map; 632 vsize_t size; 633 vsize_t align; 634 voff_t prefer; 635 uvm_flag_t flags; 636{ 637 vaddr_t kva; 638 UVMHIST_FUNC("uvm_km_valloc1"); UVMHIST_CALLED(maphist); 639 640 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x, align=0x%x, prefer=0x%x)", 641 map, size, align, prefer); 642 643 KASSERT(vm_map_pmap(map) == pmap_kernel()); 644 645 size = round_page(size); 646 /* 647 * Check if requested size is larger than the map, in which 648 * case we can't succeed. 649 */ 650 if (size > vm_map_max(map) - vm_map_min(map)) 651 return (0); 652 653 flags |= UVM_FLAG_QUANTUM; 654 for (;;) { 655 kva = vm_map_min(map); /* hint */ 656 657 /* 658 * allocate some virtual space. will be demand filled 659 * by kernel_object. 660 */ 661 662 if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object, 663 prefer, align, UVM_MAPFLAG(UVM_PROT_ALL, 664 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, flags)) 665 == 0)) { 666 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 667 return (kva); 668 } 669 670 /* 671 * failed. sleep for a while (on map) 672 */ 673 if ((flags & UVM_KMF_NOWAIT) != 0) 674 return (0); 675 676 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 677 tsleep((caddr_t)map, PVM, "vallocwait", 0); 678 } 679 /*NOTREACHED*/ 680} 681 682/* Function definitions for binary compatibility */ 683vaddr_t 684uvm_km_kmemalloc(struct vm_map *map, struct uvm_object *obj, 685 vsize_t sz, int flags) 686{ 687 return uvm_km_kmemalloc1(map, obj, sz, 0, UVM_UNKNOWN_OFFSET, flags); 688} 689 690vaddr_t uvm_km_valloc(struct vm_map *map, vsize_t sz) 691{ 692 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); 693} 694 695vaddr_t uvm_km_valloc_align(struct vm_map *map, vsize_t sz, vsize_t align) 696{ 697 return uvm_km_valloc1(map, sz, align, UVM_UNKNOWN_OFFSET, UVM_KMF_NOWAIT); 698} 699 700vaddr_t uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t sz, voff_t prefer) 701{ 702 return uvm_km_valloc1(map, sz, 0, prefer, 0); 703} 704 705vaddr_t uvm_km_valloc_wait(struct vm_map *map, vsize_t sz) 706{ 707 return uvm_km_valloc1(map, sz, 0, UVM_UNKNOWN_OFFSET, 0); 708} 709 710/* Sanity; must specify both or none. */ 711#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 712 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 713#error Must specify MAP and UNMAP together. 714#endif 715 716/* 717 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 718 * 719 * => if the pmap specifies an alternate mapping method, we use it. 720 */ 721 722/* ARGSUSED */ 723vaddr_t 724uvm_km_alloc_poolpage1(map, obj, waitok) 725 struct vm_map *map; 726 struct uvm_object *obj; 727 boolean_t waitok; 728{ 729#if defined(PMAP_MAP_POOLPAGE) 730 struct vm_page *pg; 731 vaddr_t va; 732 733 again: 734 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 735 if (__predict_false(pg == NULL)) { 736 if (waitok) { 737 uvm_wait("plpg"); 738 goto again; 739 } else 740 return (0); 741 } 742 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 743 if (__predict_false(va == 0)) 744 uvm_pagefree(pg); 745 return (va); 746#else 747 vaddr_t va; 748 int s; 749 750 /* 751 * NOTE: We may be called with a map that doens't require splvm 752 * protection (e.g. kernel_map). However, it does not hurt to 753 * go to splvm in this case (since unprocted maps will never be 754 * accessed in interrupt context). 755 * 756 * XXX We may want to consider changing the interface to this 757 * XXX function. 758 */ 759 760 s = splvm(); 761 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, 762 waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK); 763 splx(s); 764 return (va); 765#endif /* PMAP_MAP_POOLPAGE */ 766} 767 768/* 769 * uvm_km_free_poolpage: free a previously allocated pool page 770 * 771 * => if the pmap specifies an alternate unmapping method, we use it. 772 */ 773 774/* ARGSUSED */ 775void 776uvm_km_free_poolpage1(map, addr) 777 struct vm_map *map; 778 vaddr_t addr; 779{ 780#if defined(PMAP_UNMAP_POOLPAGE) 781 paddr_t pa; 782 783 pa = PMAP_UNMAP_POOLPAGE(addr); 784 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 785#else 786 int s; 787 788 /* 789 * NOTE: We may be called with a map that doens't require splvm 790 * protection (e.g. kernel_map). However, it does not hurt to 791 * go to splvm in this case (since unprocted maps will never be 792 * accessed in interrupt context). 793 * 794 * XXX We may want to consider changing the interface to this 795 * XXX function. 796 */ 797 798 s = splvm(); 799 uvm_km_free(map, addr, PAGE_SIZE); 800 splx(s); 801#endif /* PMAP_UNMAP_POOLPAGE */ 802} 803