uvm_km.c revision 1.84
1/* $NetBSD: uvm_km.c,v 1.84 2005/12/11 12:25:29 christos Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * uvm_km.c: handle kernel memory allocation and management 71 */ 72 73/* 74 * overview of kernel memory management: 75 * 76 * the kernel virtual address space is mapped by "kernel_map." kernel_map 77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 79 * 80 * the kernel_map has several "submaps." submaps can only appear in 81 * the kernel_map (user processes can't use them). submaps "take over" 82 * the management of a sub-range of the kernel's address space. submaps 83 * are typically allocated at boot time and are never released. kernel 84 * virtual address space that is mapped by a submap is locked by the 85 * submap's lock -- not the kernel_map's lock. 86 * 87 * thus, the useful feature of submaps is that they allow us to break 88 * up the locking and protection of the kernel address space into smaller 89 * chunks. 90 * 91 * the vm system has several standard kernel submaps, including: 92 * kmem_map => contains only wired kernel memory for the kernel 93 * malloc. *** access to kmem_map must be protected 94 * by splvm() because we are allowed to call malloc() 95 * at interrupt time *** 96 * mb_map => memory for large mbufs, *** protected by splvm *** 97 * pager_map => used to map "buf" structures into kernel space 98 * exec_map => used during exec to handle exec args 99 * etc... 100 * 101 * the kernel allocates its private memory out of special uvm_objects whose 102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 103 * are "special" and never die). all kernel objects should be thought of 104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 105 * object is equal to the size of kernel virtual address space (i.e. the 106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 107 * 108 * note that just because a kernel object spans the entire kernel virutal 109 * address space doesn't mean that it has to be mapped into the entire space. 110 * large chunks of a kernel object's space go unused either because 111 * that area of kernel VM is unmapped, or there is some other type of 112 * object mapped into that range (e.g. a vnode). for submap's kernel 113 * objects, the only part of the object that can ever be populated is the 114 * offsets that are managed by the submap. 115 * 116 * note that the "offset" in a kernel object is always the kernel virtual 117 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 118 * example: 119 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 120 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 121 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 122 * then that means that the page at offset 0x235000 in kernel_object is 123 * mapped at 0xf8235000. 124 * 125 * kernel object have one other special property: when the kernel virtual 126 * memory mapping them is unmapped, the backing memory in the object is 127 * freed right away. this is done with the uvm_km_pgremove() function. 128 * this has to be done because there is no backing store for kernel pages 129 * and no need to save them after they are no longer referenced. 130 */ 131 132#include <sys/cdefs.h> 133__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.84 2005/12/11 12:25:29 christos Exp $"); 134 135#include "opt_uvmhist.h" 136 137#include <sys/param.h> 138#include <sys/malloc.h> 139#include <sys/systm.h> 140#include <sys/proc.h> 141#include <sys/pool.h> 142 143#include <uvm/uvm.h> 144 145/* 146 * global data structures 147 */ 148 149struct vm_map *kernel_map = NULL; 150 151/* 152 * local data structues 153 */ 154 155static struct vm_map_kernel kernel_map_store; 156static struct vm_map_entry kernel_first_mapent_store; 157 158#if !defined(PMAP_MAP_POOLPAGE) 159 160/* 161 * kva cache 162 * 163 * XXX maybe it's better to do this at the uvm_map layer. 164 */ 165 166#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */ 167 168static void *km_vacache_alloc(struct pool *, int); 169static void km_vacache_free(struct pool *, void *); 170static void km_vacache_init(struct vm_map *, const char *, size_t); 171 172/* XXX */ 173#define KM_VACACHE_POOL_TO_MAP(pp) \ 174 ((struct vm_map *)((char *)(pp) - \ 175 offsetof(struct vm_map_kernel, vmk_vacache))) 176 177static void * 178km_vacache_alloc(struct pool *pp, int flags) 179{ 180 vaddr_t va; 181 size_t size; 182 struct vm_map *map; 183 size = pp->pr_alloc->pa_pagesz; 184 185 map = KM_VACACHE_POOL_TO_MAP(pp); 186 187 va = vm_map_min(map); /* hint */ 188 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size, 189 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 190 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM | 191 ((flags & PR_WAITOK) ? 0 : UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT)))) 192 return NULL; 193 194 return (void *)va; 195} 196 197static void 198km_vacache_free(struct pool *pp, void *v) 199{ 200 vaddr_t va = (vaddr_t)v; 201 size_t size = pp->pr_alloc->pa_pagesz; 202 struct vm_map *map; 203 204 map = KM_VACACHE_POOL_TO_MAP(pp); 205 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 206} 207 208/* 209 * km_vacache_init: initialize kva cache. 210 */ 211 212static void 213km_vacache_init(struct vm_map *map, const char *name, size_t size) 214{ 215 struct vm_map_kernel *vmk; 216 struct pool *pp; 217 struct pool_allocator *pa; 218 219 KASSERT(VM_MAP_IS_KERNEL(map)); 220 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */ 221 222 vmk = vm_map_to_kernel(map); 223 pp = &vmk->vmk_vacache; 224 pa = &vmk->vmk_vacache_allocator; 225 memset(pa, 0, sizeof(*pa)); 226 pa->pa_alloc = km_vacache_alloc; 227 pa->pa_free = km_vacache_free; 228 pa->pa_pagesz = (unsigned int)size; 229 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa); 230 231 /* XXX for now.. */ 232 pool_sethiwat(pp, 0); 233} 234 235void 236uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 237{ 238 239 map->flags |= VM_MAP_VACACHE; 240 if (size == 0) 241 size = KM_VACACHE_SIZE; 242 km_vacache_init(map, name, size); 243} 244 245#else /* !defined(PMAP_MAP_POOLPAGE) */ 246 247void 248uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 249{ 250 251 /* nothing */ 252} 253 254#endif /* !defined(PMAP_MAP_POOLPAGE) */ 255 256/* 257 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 258 * KVM already allocated for text, data, bss, and static data structures). 259 * 260 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 261 * we assume that [vmin -> start] has already been allocated and that 262 * "end" is the end. 263 */ 264 265void 266uvm_km_init(vaddr_t start, vaddr_t end) 267{ 268 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 269 270 /* 271 * next, init kernel memory objects. 272 */ 273 274 /* kernel_object: for pageable anonymous kernel memory */ 275 uao_init(); 276 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 277 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 278 279 /* 280 * init the map and reserve any space that might already 281 * have been allocated kernel space before installing. 282 */ 283 284 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 285 kernel_map_store.vmk_map.pmap = pmap_kernel(); 286 if (start != base) { 287 int error; 288 struct uvm_map_args args; 289 290 error = uvm_map_prepare(&kernel_map_store.vmk_map, 291 base, start - base, 292 NULL, UVM_UNKNOWN_OFFSET, 0, 293 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 294 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 295 if (!error) { 296 kernel_first_mapent_store.flags = 297 UVM_MAP_KERNEL | UVM_MAP_FIRST; 298 error = uvm_map_enter(&kernel_map_store.vmk_map, &args, 299 &kernel_first_mapent_store); 300 } 301 302 if (error) 303 panic( 304 "uvm_km_init: could not reserve space for kernel"); 305 } 306 307 /* 308 * install! 309 */ 310 311 kernel_map = &kernel_map_store.vmk_map; 312 uvm_km_vacache_init(kernel_map, "kvakernel", 0); 313} 314 315/* 316 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 317 * is allocated all references to that area of VM must go through it. this 318 * allows the locking of VAs in kernel_map to be broken up into regions. 319 * 320 * => if `fixed' is true, *vmin specifies where the region described 321 * by the submap must start 322 * => if submap is non NULL we use that as the submap, otherwise we 323 * alloc a new map 324 */ 325 326struct vm_map * 327uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 328 vaddr_t *vmax /* OUT */, vsize_t size, int flags, boolean_t fixed, 329 struct vm_map_kernel *submap) 330{ 331 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 332 333 KASSERT(vm_map_pmap(map) == pmap_kernel()); 334 335 size = round_page(size); /* round up to pagesize */ 336 337 /* 338 * first allocate a blank spot in the parent map 339 */ 340 341 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 342 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 343 UVM_ADV_RANDOM, mapflags)) != 0) { 344 panic("uvm_km_suballoc: unable to allocate space in parent map"); 345 } 346 347 /* 348 * set VM bounds (vmin is filled in by uvm_map) 349 */ 350 351 *vmax = *vmin + size; 352 353 /* 354 * add references to pmap and create or init the submap 355 */ 356 357 pmap_reference(vm_map_pmap(map)); 358 if (submap == NULL) { 359 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK); 360 if (submap == NULL) 361 panic("uvm_km_suballoc: unable to create submap"); 362 } 363 uvm_map_setup_kernel(submap, *vmin, *vmax, flags); 364 submap->vmk_map.pmap = vm_map_pmap(map); 365 366 /* 367 * now let uvm_map_submap plug in it... 368 */ 369 370 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0) 371 panic("uvm_km_suballoc: submap allocation failed"); 372 373 return(&submap->vmk_map); 374} 375 376/* 377 * uvm_km_pgremove: remove pages from a kernel uvm_object. 378 * 379 * => when you unmap a part of anonymous kernel memory you want to toss 380 * the pages right away. (this gets called from uvm_unmap_...). 381 */ 382 383void 384uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 385{ 386 struct uvm_object * const uobj = uvm.kernel_object; 387 const voff_t start = startva - vm_map_min(kernel_map); 388 const voff_t end = endva - vm_map_min(kernel_map); 389 struct vm_page *pg; 390 voff_t curoff, nextoff; 391 int swpgonlydelta = 0; 392 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 393 394 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 395 KASSERT(startva < endva); 396 KASSERT(endva < VM_MAX_KERNEL_ADDRESS); 397 398 simple_lock(&uobj->vmobjlock); 399 400 for (curoff = start; curoff < end; curoff = nextoff) { 401 nextoff = curoff + PAGE_SIZE; 402 pg = uvm_pagelookup(uobj, curoff); 403 if (pg != NULL && pg->flags & PG_BUSY) { 404 pg->flags |= PG_WANTED; 405 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 406 "km_pgrm", 0); 407 simple_lock(&uobj->vmobjlock); 408 nextoff = curoff; 409 continue; 410 } 411 412 /* 413 * free the swap slot, then the page. 414 */ 415 416 if (pg == NULL && 417 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 418 swpgonlydelta++; 419 } 420 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 421 if (pg != NULL) { 422 uvm_lock_pageq(); 423 uvm_pagefree(pg); 424 uvm_unlock_pageq(); 425 } 426 } 427 simple_unlock(&uobj->vmobjlock); 428 429 if (swpgonlydelta > 0) { 430 simple_lock(&uvm.swap_data_lock); 431 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 432 uvmexp.swpgonly -= swpgonlydelta; 433 simple_unlock(&uvm.swap_data_lock); 434 } 435} 436 437 438/* 439 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 440 * regions. 441 * 442 * => when you unmap a part of anonymous kernel memory you want to toss 443 * the pages right away. (this is called from uvm_unmap_...). 444 * => none of the pages will ever be busy, and none of them will ever 445 * be on the active or inactive queues (because they have no object). 446 */ 447 448void 449uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 450{ 451 struct vm_page *pg; 452 paddr_t pa; 453 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 454 455 KASSERT(VM_MIN_KERNEL_ADDRESS <= start); 456 KASSERT(start < end); 457 KASSERT(end < VM_MAX_KERNEL_ADDRESS); 458 459 for (; start < end; start += PAGE_SIZE) { 460 if (!pmap_extract(pmap_kernel(), start, &pa)) { 461 continue; 462 } 463 pg = PHYS_TO_VM_PAGE(pa); 464 KASSERT(pg); 465 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 466 uvm_pagefree(pg); 467 } 468} 469 470#if defined(DEBUG) 471void 472uvm_km_check_empty(vaddr_t start, vaddr_t end, boolean_t intrsafe) 473{ 474 vaddr_t va; 475 paddr_t pa; 476 477 KDASSERT(VM_MIN_KERNEL_ADDRESS <= start); 478 KDASSERT(start < end); 479 KDASSERT(end < VM_MAX_KERNEL_ADDRESS); 480 481 for (va = start; va < end; va += PAGE_SIZE) { 482 if (pmap_extract(pmap_kernel(), va, &pa)) { 483 panic("uvm_km_check_empty: va %p has pa 0x%llx", 484 (void *)va, (long long)pa); 485 } 486 if (!intrsafe) { 487 const struct vm_page *pg; 488 489 simple_lock(&uvm.kernel_object->vmobjlock); 490 pg = uvm_pagelookup(uvm.kernel_object, 491 va - vm_map_min(kernel_map)); 492 simple_unlock(&uvm.kernel_object->vmobjlock); 493 if (pg) { 494 panic("uvm_km_check_empty: " 495 "has page hashed at %p", (const void *)va); 496 } 497 } 498 } 499} 500#endif /* defined(DEBUG) */ 501 502/* 503 * uvm_km_alloc: allocate an area of kernel memory. 504 * 505 * => NOTE: we can return 0 even if we can wait if there is not enough 506 * free VM space in the map... caller should be prepared to handle 507 * this case. 508 * => we return KVA of memory allocated 509 */ 510 511vaddr_t 512uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 513{ 514 vaddr_t kva, loopva; 515 vaddr_t offset; 516 vsize_t loopsize; 517 struct vm_page *pg; 518 struct uvm_object *obj; 519 int pgaflags; 520 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 521 522 KASSERT(vm_map_pmap(map) == pmap_kernel()); 523 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 524 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 525 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 526 527 /* 528 * setup for call 529 */ 530 531 kva = vm_map_min(map); /* hint */ 532 size = round_page(size); 533 obj = (flags & UVM_KMF_PAGEABLE) ? uvm.kernel_object : NULL; 534 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 535 map, obj, size, flags); 536 537 /* 538 * allocate some virtual space 539 */ 540 541 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 542 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 543 UVM_ADV_RANDOM, 544 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA)) 545 | UVM_FLAG_QUANTUM)) != 0)) { 546 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 547 return(0); 548 } 549 550 /* 551 * if all we wanted was VA, return now 552 */ 553 554 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 555 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 556 return(kva); 557 } 558 559 /* 560 * recover object offset from virtual address 561 */ 562 563 offset = kva - vm_map_min(kernel_map); 564 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 565 566 /* 567 * now allocate and map in the memory... note that we are the only ones 568 * whom should ever get a handle on this area of VM. 569 */ 570 571 loopva = kva; 572 loopsize = size; 573 574 pgaflags = UVM_PGA_USERESERVE; 575 if (flags & UVM_KMF_ZERO) 576 pgaflags |= UVM_PGA_ZERO; 577 while (loopsize) { 578 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 579 580 pg = uvm_pagealloc(NULL, offset, NULL, pgaflags); 581 582 /* 583 * out of memory? 584 */ 585 586 if (__predict_false(pg == NULL)) { 587 if ((flags & UVM_KMF_NOWAIT) || 588 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 589 /* free everything! */ 590 uvm_km_free(map, kva, size, 591 flags & UVM_KMF_TYPEMASK); 592 return (0); 593 } else { 594 uvm_wait("km_getwait2"); /* sleep here */ 595 continue; 596 } 597 } 598 599 pg->flags &= ~PG_BUSY; /* new page */ 600 UVM_PAGE_OWN(pg, NULL); 601 602 /* 603 * map it in 604 */ 605 606 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 607 VM_PROT_READ | VM_PROT_WRITE); 608 loopva += PAGE_SIZE; 609 offset += PAGE_SIZE; 610 loopsize -= PAGE_SIZE; 611 } 612 613 pmap_update(pmap_kernel()); 614 615 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 616 return(kva); 617} 618 619/* 620 * uvm_km_free: free an area of kernel memory 621 */ 622 623void 624uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 625{ 626 627 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 628 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 629 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 630 KASSERT((addr & PAGE_MASK) == 0); 631 KASSERT(vm_map_pmap(map) == pmap_kernel()); 632 633 size = round_page(size); 634 635 if (flags & UVM_KMF_PAGEABLE) { 636 uvm_km_pgremove(addr, addr + size); 637 pmap_remove(pmap_kernel(), addr, addr + size); 638 } else if (flags & UVM_KMF_WIRED) { 639 uvm_km_pgremove_intrsafe(addr, addr + size); 640 pmap_kremove(addr, size); 641 } 642 643 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 644} 645 646/* Sanity; must specify both or none. */ 647#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 648 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 649#error Must specify MAP and UNMAP together. 650#endif 651 652/* 653 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 654 * 655 * => if the pmap specifies an alternate mapping method, we use it. 656 */ 657 658/* ARGSUSED */ 659vaddr_t 660uvm_km_alloc_poolpage_cache(struct vm_map *map, boolean_t waitok) 661{ 662#if defined(PMAP_MAP_POOLPAGE) 663 return uvm_km_alloc_poolpage(map, waitok); 664#else 665 struct vm_page *pg; 666 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache; 667 vaddr_t va; 668 int s = 0xdeadbeaf; /* XXX: gcc */ 669 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 670 671 if ((map->flags & VM_MAP_VACACHE) == 0) 672 return uvm_km_alloc_poolpage(map, waitok); 673 674 if (intrsafe) 675 s = splvm(); 676 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT); 677 if (intrsafe) 678 splx(s); 679 if (va == 0) 680 return 0; 681 KASSERT(!pmap_extract(pmap_kernel(), va, NULL)); 682again: 683 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 684 if (__predict_false(pg == NULL)) { 685 if (waitok) { 686 uvm_wait("plpg"); 687 goto again; 688 } else { 689 if (intrsafe) 690 s = splvm(); 691 pool_put(pp, (void *)va); 692 if (intrsafe) 693 splx(s); 694 return 0; 695 } 696 } 697 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); 698 pmap_update(pmap_kernel()); 699 700 return va; 701#endif /* PMAP_MAP_POOLPAGE */ 702} 703 704vaddr_t 705uvm_km_alloc_poolpage(struct vm_map *map, boolean_t waitok) 706{ 707#if defined(PMAP_MAP_POOLPAGE) 708 struct vm_page *pg; 709 vaddr_t va; 710 711 again: 712 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 713 if (__predict_false(pg == NULL)) { 714 if (waitok) { 715 uvm_wait("plpg"); 716 goto again; 717 } else 718 return (0); 719 } 720 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 721 if (__predict_false(va == 0)) 722 uvm_pagefree(pg); 723 return (va); 724#else 725 vaddr_t va; 726 int s = 0xdeadbeaf; /* XXX: gcc */ 727 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 728 729 if (intrsafe) 730 s = splvm(); 731 va = uvm_km_alloc(map, PAGE_SIZE, 0, 732 (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED); 733 if (intrsafe) 734 splx(s); 735 return (va); 736#endif /* PMAP_MAP_POOLPAGE */ 737} 738 739/* 740 * uvm_km_free_poolpage: free a previously allocated pool page 741 * 742 * => if the pmap specifies an alternate unmapping method, we use it. 743 */ 744 745/* ARGSUSED */ 746void 747uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr) 748{ 749#if defined(PMAP_UNMAP_POOLPAGE) 750 uvm_km_free_poolpage(map, addr); 751#else 752 struct pool *pp; 753 int s = 0xdeadbeaf; /* XXX: gcc */ 754 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 755 756 if ((map->flags & VM_MAP_VACACHE) == 0) { 757 uvm_km_free_poolpage(map, addr); 758 return; 759 } 760 761 KASSERT(pmap_extract(pmap_kernel(), addr, NULL)); 762 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE); 763 pmap_kremove(addr, PAGE_SIZE); 764#if defined(DEBUG) 765 pmap_update(pmap_kernel()); 766#endif 767 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL)); 768 pp = &vm_map_to_kernel(map)->vmk_vacache; 769 if (intrsafe) 770 s = splvm(); 771 pool_put(pp, (void *)addr); 772 if (intrsafe) 773 splx(s); 774#endif 775} 776 777/* ARGSUSED */ 778void 779uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr) 780{ 781#if defined(PMAP_UNMAP_POOLPAGE) 782 paddr_t pa; 783 784 pa = PMAP_UNMAP_POOLPAGE(addr); 785 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 786#else 787 int s = 0xdeadbeaf; /* XXX: gcc */ 788 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 789 790 if (intrsafe) 791 s = splvm(); 792 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED); 793 if (intrsafe) 794 splx(s); 795#endif /* PMAP_UNMAP_POOLPAGE */ 796} 797