uvm_km.c revision 1.109
1/* $NetBSD: uvm_km.c,v 1.109 2011/06/12 03:36:03 rmind Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps, including: 87 * kmem_map => contains only wired kernel memory for the kernel 88 * malloc. 89 * pager_map => used to map "buf" structures into kernel space 90 * exec_map => used during exec to handle exec args 91 * etc... 92 * 93 * the kernel allocates its private memory out of special uvm_objects whose 94 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 95 * are "special" and never die). all kernel objects should be thought of 96 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 97 * object is equal to the size of kernel virtual address space (i.e. the 98 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 99 * 100 * note that just because a kernel object spans the entire kernel virtual 101 * address space doesn't mean that it has to be mapped into the entire space. 102 * large chunks of a kernel object's space go unused either because 103 * that area of kernel VM is unmapped, or there is some other type of 104 * object mapped into that range (e.g. a vnode). for submap's kernel 105 * objects, the only part of the object that can ever be populated is the 106 * offsets that are managed by the submap. 107 * 108 * note that the "offset" in a kernel object is always the kernel virtual 109 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 110 * example: 111 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 112 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 113 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 114 * then that means that the page at offset 0x235000 in kernel_object is 115 * mapped at 0xf8235000. 116 * 117 * kernel object have one other special property: when the kernel virtual 118 * memory mapping them is unmapped, the backing memory in the object is 119 * freed right away. this is done with the uvm_km_pgremove() function. 120 * this has to be done because there is no backing store for kernel pages 121 * and no need to save them after they are no longer referenced. 122 */ 123 124#include <sys/cdefs.h> 125__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.109 2011/06/12 03:36:03 rmind Exp $"); 126 127#include "opt_uvmhist.h" 128 129#include <sys/param.h> 130#include <sys/malloc.h> 131#include <sys/systm.h> 132#include <sys/proc.h> 133#include <sys/pool.h> 134 135#include <uvm/uvm.h> 136 137/* 138 * global data structures 139 */ 140 141struct vm_map *kernel_map = NULL; 142 143/* 144 * local data structues 145 */ 146 147static struct vm_map_kernel kernel_map_store; 148static struct vm_map_entry kernel_first_mapent_store; 149 150#if !defined(PMAP_MAP_POOLPAGE) 151 152/* 153 * kva cache 154 * 155 * XXX maybe it's better to do this at the uvm_map layer. 156 */ 157 158#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */ 159 160static void *km_vacache_alloc(struct pool *, int); 161static void km_vacache_free(struct pool *, void *); 162static void km_vacache_init(struct vm_map *, const char *, size_t); 163 164/* XXX */ 165#define KM_VACACHE_POOL_TO_MAP(pp) \ 166 ((struct vm_map *)((char *)(pp) - \ 167 offsetof(struct vm_map_kernel, vmk_vacache))) 168 169static void * 170km_vacache_alloc(struct pool *pp, int flags) 171{ 172 vaddr_t va; 173 size_t size; 174 struct vm_map *map; 175 size = pp->pr_alloc->pa_pagesz; 176 177 map = KM_VACACHE_POOL_TO_MAP(pp); 178 179 va = vm_map_min(map); /* hint */ 180 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size, 181 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 182 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM | 183 ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA : 184 UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT)))) 185 return NULL; 186 187 return (void *)va; 188} 189 190static void 191km_vacache_free(struct pool *pp, void *v) 192{ 193 vaddr_t va = (vaddr_t)v; 194 size_t size = pp->pr_alloc->pa_pagesz; 195 struct vm_map *map; 196 197 map = KM_VACACHE_POOL_TO_MAP(pp); 198 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 199} 200 201/* 202 * km_vacache_init: initialize kva cache. 203 */ 204 205static void 206km_vacache_init(struct vm_map *map, const char *name, size_t size) 207{ 208 struct vm_map_kernel *vmk; 209 struct pool *pp; 210 struct pool_allocator *pa; 211 int ipl; 212 213 KASSERT(VM_MAP_IS_KERNEL(map)); 214 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */ 215 216 217 vmk = vm_map_to_kernel(map); 218 pp = &vmk->vmk_vacache; 219 pa = &vmk->vmk_vacache_allocator; 220 memset(pa, 0, sizeof(*pa)); 221 pa->pa_alloc = km_vacache_alloc; 222 pa->pa_free = km_vacache_free; 223 pa->pa_pagesz = (unsigned int)size; 224 pa->pa_backingmap = map; 225 pa->pa_backingmapptr = NULL; 226 227 if ((map->flags & VM_MAP_INTRSAFE) != 0) 228 ipl = IPL_VM; 229 else 230 ipl = IPL_NONE; 231 232 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa, 233 ipl); 234} 235 236void 237uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 238{ 239 240 map->flags |= VM_MAP_VACACHE; 241 if (size == 0) 242 size = KM_VACACHE_SIZE; 243 km_vacache_init(map, name, size); 244} 245 246#else /* !defined(PMAP_MAP_POOLPAGE) */ 247 248void 249uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 250{ 251 252 /* nothing */ 253} 254 255#endif /* !defined(PMAP_MAP_POOLPAGE) */ 256 257void 258uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags) 259{ 260 struct vm_map_kernel *vmk = vm_map_to_kernel(map); 261 262 callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL); 263} 264 265/* 266 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 267 * KVM already allocated for text, data, bss, and static data structures). 268 * 269 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 270 * we assume that [vmin -> start] has already been allocated and that 271 * "end" is the end. 272 */ 273 274void 275uvm_km_init(vaddr_t start, vaddr_t end) 276{ 277 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 278 279 /* 280 * next, init kernel memory objects. 281 */ 282 283 /* kernel_object: for pageable anonymous kernel memory */ 284 uao_init(); 285 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 286 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 287 288 /* 289 * init the map and reserve any space that might already 290 * have been allocated kernel space before installing. 291 */ 292 293 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 294 kernel_map_store.vmk_map.pmap = pmap_kernel(); 295 if (start != base) { 296 int error; 297 struct uvm_map_args args; 298 299 error = uvm_map_prepare(&kernel_map_store.vmk_map, 300 base, start - base, 301 NULL, UVM_UNKNOWN_OFFSET, 0, 302 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 303 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 304 if (!error) { 305 kernel_first_mapent_store.flags = 306 UVM_MAP_KERNEL | UVM_MAP_FIRST; 307 error = uvm_map_enter(&kernel_map_store.vmk_map, &args, 308 &kernel_first_mapent_store); 309 } 310 311 if (error) 312 panic( 313 "uvm_km_init: could not reserve space for kernel"); 314 } 315 316 /* 317 * install! 318 */ 319 320 kernel_map = &kernel_map_store.vmk_map; 321 uvm_km_vacache_init(kernel_map, "kvakernel", 0); 322} 323 324/* 325 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 326 * is allocated all references to that area of VM must go through it. this 327 * allows the locking of VAs in kernel_map to be broken up into regions. 328 * 329 * => if `fixed' is true, *vmin specifies where the region described 330 * by the submap must start 331 * => if submap is non NULL we use that as the submap, otherwise we 332 * alloc a new map 333 */ 334 335struct vm_map * 336uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 337 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 338 struct vm_map_kernel *submap) 339{ 340 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 341 342 KASSERT(vm_map_pmap(map) == pmap_kernel()); 343 344 size = round_page(size); /* round up to pagesize */ 345 size += uvm_mapent_overhead(size, flags); 346 347 /* 348 * first allocate a blank spot in the parent map 349 */ 350 351 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 352 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 353 UVM_ADV_RANDOM, mapflags)) != 0) { 354 panic("uvm_km_suballoc: unable to allocate space in parent map"); 355 } 356 357 /* 358 * set VM bounds (vmin is filled in by uvm_map) 359 */ 360 361 *vmax = *vmin + size; 362 363 /* 364 * add references to pmap and create or init the submap 365 */ 366 367 pmap_reference(vm_map_pmap(map)); 368 if (submap == NULL) { 369 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK); 370 if (submap == NULL) 371 panic("uvm_km_suballoc: unable to create submap"); 372 } 373 uvm_map_setup_kernel(submap, *vmin, *vmax, flags); 374 submap->vmk_map.pmap = vm_map_pmap(map); 375 376 /* 377 * now let uvm_map_submap plug in it... 378 */ 379 380 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0) 381 panic("uvm_km_suballoc: submap allocation failed"); 382 383 return(&submap->vmk_map); 384} 385 386/* 387 * uvm_km_pgremove: remove pages from a kernel uvm_object. 388 * 389 * => when you unmap a part of anonymous kernel memory you want to toss 390 * the pages right away. (this gets called from uvm_unmap_...). 391 */ 392 393void 394uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 395{ 396 struct uvm_object * const uobj = uvm_kernel_object; 397 const voff_t start = startva - vm_map_min(kernel_map); 398 const voff_t end = endva - vm_map_min(kernel_map); 399 struct vm_page *pg; 400 voff_t curoff, nextoff; 401 int swpgonlydelta = 0; 402 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 403 404 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 405 KASSERT(startva < endva); 406 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 407 408 mutex_enter(uobj->vmobjlock); 409 410 for (curoff = start; curoff < end; curoff = nextoff) { 411 nextoff = curoff + PAGE_SIZE; 412 pg = uvm_pagelookup(uobj, curoff); 413 if (pg != NULL && pg->flags & PG_BUSY) { 414 pg->flags |= PG_WANTED; 415 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 416 "km_pgrm", 0); 417 mutex_enter(uobj->vmobjlock); 418 nextoff = curoff; 419 continue; 420 } 421 422 /* 423 * free the swap slot, then the page. 424 */ 425 426 if (pg == NULL && 427 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 428 swpgonlydelta++; 429 } 430 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 431 if (pg != NULL) { 432 mutex_enter(&uvm_pageqlock); 433 uvm_pagefree(pg); 434 mutex_exit(&uvm_pageqlock); 435 } 436 } 437 mutex_exit(uobj->vmobjlock); 438 439 if (swpgonlydelta > 0) { 440 mutex_enter(&uvm_swap_data_lock); 441 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 442 uvmexp.swpgonly -= swpgonlydelta; 443 mutex_exit(&uvm_swap_data_lock); 444 } 445} 446 447 448/* 449 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 450 * regions. 451 * 452 * => when you unmap a part of anonymous kernel memory you want to toss 453 * the pages right away. (this is called from uvm_unmap_...). 454 * => none of the pages will ever be busy, and none of them will ever 455 * be on the active or inactive queues (because they have no object). 456 */ 457 458void 459uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 460{ 461 struct vm_page *pg; 462 paddr_t pa; 463 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 464 465 KASSERT(VM_MAP_IS_KERNEL(map)); 466 KASSERT(vm_map_min(map) <= start); 467 KASSERT(start < end); 468 KASSERT(end <= vm_map_max(map)); 469 470 for (; start < end; start += PAGE_SIZE) { 471 if (!pmap_extract(pmap_kernel(), start, &pa)) { 472 continue; 473 } 474 pg = PHYS_TO_VM_PAGE(pa); 475 KASSERT(pg); 476 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 477 uvm_pagefree(pg); 478 } 479} 480 481#if defined(DEBUG) 482void 483uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 484{ 485 struct vm_page *pg; 486 vaddr_t va; 487 paddr_t pa; 488 489 KDASSERT(VM_MAP_IS_KERNEL(map)); 490 KDASSERT(vm_map_min(map) <= start); 491 KDASSERT(start < end); 492 KDASSERT(end <= vm_map_max(map)); 493 494 for (va = start; va < end; va += PAGE_SIZE) { 495 if (pmap_extract(pmap_kernel(), va, &pa)) { 496 panic("uvm_km_check_empty: va %p has pa 0x%llx", 497 (void *)va, (long long)pa); 498 } 499 if ((map->flags & VM_MAP_INTRSAFE) == 0) { 500 mutex_enter(uvm_kernel_object->vmobjlock); 501 pg = uvm_pagelookup(uvm_kernel_object, 502 va - vm_map_min(kernel_map)); 503 mutex_exit(uvm_kernel_object->vmobjlock); 504 if (pg) { 505 panic("uvm_km_check_empty: " 506 "has page hashed at %p", (const void *)va); 507 } 508 } 509 } 510} 511#endif /* defined(DEBUG) */ 512 513/* 514 * uvm_km_alloc: allocate an area of kernel memory. 515 * 516 * => NOTE: we can return 0 even if we can wait if there is not enough 517 * free VM space in the map... caller should be prepared to handle 518 * this case. 519 * => we return KVA of memory allocated 520 */ 521 522vaddr_t 523uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 524{ 525 vaddr_t kva, loopva; 526 vaddr_t offset; 527 vsize_t loopsize; 528 struct vm_page *pg; 529 struct uvm_object *obj; 530 int pgaflags; 531 vm_prot_t prot; 532 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 533 534 KASSERT(vm_map_pmap(map) == pmap_kernel()); 535 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 536 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 537 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 538 539 /* 540 * setup for call 541 */ 542 543 kva = vm_map_min(map); /* hint */ 544 size = round_page(size); 545 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 546 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 547 map, obj, size, flags); 548 549 /* 550 * allocate some virtual space 551 */ 552 553 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 554 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 555 UVM_ADV_RANDOM, 556 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA)) 557 | UVM_FLAG_QUANTUM)) != 0)) { 558 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 559 return(0); 560 } 561 562 /* 563 * if all we wanted was VA, return now 564 */ 565 566 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 567 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 568 return(kva); 569 } 570 571 /* 572 * recover object offset from virtual address 573 */ 574 575 offset = kva - vm_map_min(kernel_map); 576 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 577 578 /* 579 * now allocate and map in the memory... note that we are the only ones 580 * whom should ever get a handle on this area of VM. 581 */ 582 583 loopva = kva; 584 loopsize = size; 585 586 pgaflags = UVM_FLAG_COLORMATCH; 587 if (flags & UVM_KMF_NOWAIT) 588 pgaflags |= UVM_PGA_USERESERVE; 589 if (flags & UVM_KMF_ZERO) 590 pgaflags |= UVM_PGA_ZERO; 591 prot = VM_PROT_READ | VM_PROT_WRITE; 592 if (flags & UVM_KMF_EXEC) 593 prot |= VM_PROT_EXECUTE; 594 while (loopsize) { 595 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 596 597 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 598#ifdef UVM_KM_VMFREELIST 599 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 600#else 601 UVM_PGA_STRAT_NORMAL, 0 602#endif 603 ); 604 605 /* 606 * out of memory? 607 */ 608 609 if (__predict_false(pg == NULL)) { 610 if ((flags & UVM_KMF_NOWAIT) || 611 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 612 /* free everything! */ 613 uvm_km_free(map, kva, size, 614 flags & UVM_KMF_TYPEMASK); 615 return (0); 616 } else { 617 uvm_wait("km_getwait2"); /* sleep here */ 618 continue; 619 } 620 } 621 622 pg->flags &= ~PG_BUSY; /* new page */ 623 UVM_PAGE_OWN(pg, NULL); 624 625 /* 626 * map it in 627 */ 628 629 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 630 prot, PMAP_KMPAGE); 631 loopva += PAGE_SIZE; 632 offset += PAGE_SIZE; 633 loopsize -= PAGE_SIZE; 634 } 635 636 pmap_update(pmap_kernel()); 637 638 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 639 return(kva); 640} 641 642/* 643 * uvm_km_free: free an area of kernel memory 644 */ 645 646void 647uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 648{ 649 650 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 651 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 652 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 653 KASSERT((addr & PAGE_MASK) == 0); 654 KASSERT(vm_map_pmap(map) == pmap_kernel()); 655 656 size = round_page(size); 657 658 659 if (flags & UVM_KMF_PAGEABLE) { 660 /* 661 * No need to lock for pmap, since the kernel is always 662 * self-consistent. The pages cannot be in use elsewhere. 663 */ 664 uvm_km_pgremove(addr, addr + size); 665 pmap_remove(pmap_kernel(), addr, addr + size); 666 667 } else if (flags & UVM_KMF_WIRED) { 668 /* 669 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 670 * remove it after. See comment below about KVA visibility. 671 */ 672 uvm_km_pgremove_intrsafe(map, addr, addr + size); 673 pmap_kremove(addr, size); 674 } 675 676 /* 677 * Note: uvm_unmap_remove() calls pmap_update() for us, before 678 * KVA becomes globally available. 679 */ 680 681 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 682} 683 684/* Sanity; must specify both or none. */ 685#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 686 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 687#error Must specify MAP and UNMAP together. 688#endif 689 690/* 691 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 692 * 693 * => if the pmap specifies an alternate mapping method, we use it. 694 */ 695 696/* ARGSUSED */ 697vaddr_t 698uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok) 699{ 700#if defined(PMAP_MAP_POOLPAGE) 701 return uvm_km_alloc_poolpage(map, waitok); 702#else 703 struct vm_page *pg; 704 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache; 705 vaddr_t va; 706 707 if ((map->flags & VM_MAP_VACACHE) == 0) 708 return uvm_km_alloc_poolpage(map, waitok); 709 710 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT); 711 if (va == 0) 712 return 0; 713 KASSERT(!pmap_extract(pmap_kernel(), va, NULL)); 714again: 715 pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE); 716 if (__predict_false(pg == NULL)) { 717 if (waitok) { 718 uvm_wait("plpg"); 719 goto again; 720 } else { 721 pool_put(pp, (void *)va); 722 return 0; 723 } 724 } 725 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), 726 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 727 pmap_update(pmap_kernel()); 728 729 return va; 730#endif /* PMAP_MAP_POOLPAGE */ 731} 732 733vaddr_t 734uvm_km_alloc_poolpage(struct vm_map *map, bool waitok) 735{ 736#if defined(PMAP_MAP_POOLPAGE) 737 struct vm_page *pg; 738 vaddr_t va; 739 740 741 again: 742#ifdef PMAP_ALLOC_POOLPAGE 743 pg = PMAP_ALLOC_POOLPAGE(waitok ? 0 : UVM_PGA_USERESERVE); 744#else 745 pg = uvm_pagealloc(NULL, 0, NULL, waitok ? 0 : UVM_PGA_USERESERVE); 746#endif 747 if (__predict_false(pg == NULL)) { 748 if (waitok) { 749 uvm_wait("plpg"); 750 goto again; 751 } else 752 return (0); 753 } 754 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 755 if (__predict_false(va == 0)) 756 uvm_pagefree(pg); 757 return (va); 758#else 759 vaddr_t va; 760 761 va = uvm_km_alloc(map, PAGE_SIZE, 0, 762 (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED); 763 return (va); 764#endif /* PMAP_MAP_POOLPAGE */ 765} 766 767/* 768 * uvm_km_free_poolpage: free a previously allocated pool page 769 * 770 * => if the pmap specifies an alternate unmapping method, we use it. 771 */ 772 773/* ARGSUSED */ 774void 775uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr) 776{ 777#if defined(PMAP_UNMAP_POOLPAGE) 778 uvm_km_free_poolpage(map, addr); 779#else 780 struct pool *pp; 781 782 if ((map->flags & VM_MAP_VACACHE) == 0) { 783 uvm_km_free_poolpage(map, addr); 784 return; 785 } 786 787 KASSERT(pmap_extract(pmap_kernel(), addr, NULL)); 788 uvm_km_pgremove_intrsafe(map, addr, addr + PAGE_SIZE); 789 pmap_kremove(addr, PAGE_SIZE); 790#if defined(DEBUG) 791 pmap_update(pmap_kernel()); 792#endif 793 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL)); 794 pp = &vm_map_to_kernel(map)->vmk_vacache; 795 pool_put(pp, (void *)addr); 796#endif 797} 798 799/* ARGSUSED */ 800void 801uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr) 802{ 803#if defined(PMAP_UNMAP_POOLPAGE) 804 paddr_t pa; 805 806 pa = PMAP_UNMAP_POOLPAGE(addr); 807 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 808#else 809 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED); 810#endif /* PMAP_UNMAP_POOLPAGE */ 811} 812