uvm_km.c revision 1.96
1/* $NetBSD: uvm_km.c,v 1.96 2007/07/21 20:52:59 ad Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * uvm_km.c: handle kernel memory allocation and management 71 */ 72 73/* 74 * overview of kernel memory management: 75 * 76 * the kernel virtual address space is mapped by "kernel_map." kernel_map 77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 79 * 80 * the kernel_map has several "submaps." submaps can only appear in 81 * the kernel_map (user processes can't use them). submaps "take over" 82 * the management of a sub-range of the kernel's address space. submaps 83 * are typically allocated at boot time and are never released. kernel 84 * virtual address space that is mapped by a submap is locked by the 85 * submap's lock -- not the kernel_map's lock. 86 * 87 * thus, the useful feature of submaps is that they allow us to break 88 * up the locking and protection of the kernel address space into smaller 89 * chunks. 90 * 91 * the vm system has several standard kernel submaps, including: 92 * kmem_map => contains only wired kernel memory for the kernel 93 * malloc. *** access to kmem_map must be protected 94 * by splvm() because we are allowed to call malloc() 95 * at interrupt time *** 96 * mb_map => memory for large mbufs, *** protected by splvm *** 97 * pager_map => used to map "buf" structures into kernel space 98 * exec_map => used during exec to handle exec args 99 * etc... 100 * 101 * the kernel allocates its private memory out of special uvm_objects whose 102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 103 * are "special" and never die). all kernel objects should be thought of 104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 105 * object is equal to the size of kernel virtual address space (i.e. the 106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 107 * 108 * note that just because a kernel object spans the entire kernel virutal 109 * address space doesn't mean that it has to be mapped into the entire space. 110 * large chunks of a kernel object's space go unused either because 111 * that area of kernel VM is unmapped, or there is some other type of 112 * object mapped into that range (e.g. a vnode). for submap's kernel 113 * objects, the only part of the object that can ever be populated is the 114 * offsets that are managed by the submap. 115 * 116 * note that the "offset" in a kernel object is always the kernel virtual 117 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 118 * example: 119 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 120 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 121 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 122 * then that means that the page at offset 0x235000 in kernel_object is 123 * mapped at 0xf8235000. 124 * 125 * kernel object have one other special property: when the kernel virtual 126 * memory mapping them is unmapped, the backing memory in the object is 127 * freed right away. this is done with the uvm_km_pgremove() function. 128 * this has to be done because there is no backing store for kernel pages 129 * and no need to save them after they are no longer referenced. 130 */ 131 132#include <sys/cdefs.h> 133__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.96 2007/07/21 20:52:59 ad Exp $"); 134 135#include "opt_uvmhist.h" 136 137#include <sys/param.h> 138#include <sys/malloc.h> 139#include <sys/systm.h> 140#include <sys/proc.h> 141#include <sys/pool.h> 142 143#include <uvm/uvm.h> 144 145/* 146 * global data structures 147 */ 148 149struct vm_map *kernel_map = NULL; 150 151/* 152 * local data structues 153 */ 154 155static struct vm_map_kernel kernel_map_store; 156static struct vm_map_entry kernel_first_mapent_store; 157 158#if !defined(PMAP_MAP_POOLPAGE) 159 160/* 161 * kva cache 162 * 163 * XXX maybe it's better to do this at the uvm_map layer. 164 */ 165 166#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */ 167 168static void *km_vacache_alloc(struct pool *, int); 169static void km_vacache_free(struct pool *, void *); 170static void km_vacache_init(struct vm_map *, const char *, size_t); 171 172/* XXX */ 173#define KM_VACACHE_POOL_TO_MAP(pp) \ 174 ((struct vm_map *)((char *)(pp) - \ 175 offsetof(struct vm_map_kernel, vmk_vacache))) 176 177static void * 178km_vacache_alloc(struct pool *pp, int flags) 179{ 180 vaddr_t va; 181 size_t size; 182 struct vm_map *map; 183 size = pp->pr_alloc->pa_pagesz; 184 185 map = KM_VACACHE_POOL_TO_MAP(pp); 186 187 va = vm_map_min(map); /* hint */ 188 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size, 189 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 190 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM | 191 ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA : 192 UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT)))) 193 return NULL; 194 195 return (void *)va; 196} 197 198static void 199km_vacache_free(struct pool *pp, void *v) 200{ 201 vaddr_t va = (vaddr_t)v; 202 size_t size = pp->pr_alloc->pa_pagesz; 203 struct vm_map *map; 204 205 map = KM_VACACHE_POOL_TO_MAP(pp); 206 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 207} 208 209/* 210 * km_vacache_init: initialize kva cache. 211 */ 212 213static void 214km_vacache_init(struct vm_map *map, const char *name, size_t size) 215{ 216 struct vm_map_kernel *vmk; 217 struct pool *pp; 218 struct pool_allocator *pa; 219 int ipl; 220 221 KASSERT(VM_MAP_IS_KERNEL(map)); 222 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */ 223 224 vmk = vm_map_to_kernel(map); 225 pp = &vmk->vmk_vacache; 226 pa = &vmk->vmk_vacache_allocator; 227 memset(pa, 0, sizeof(*pa)); 228 pa->pa_alloc = km_vacache_alloc; 229 pa->pa_free = km_vacache_free; 230 pa->pa_pagesz = (unsigned int)size; 231 pa->pa_backingmap = map; 232 pa->pa_backingmapptr = NULL; 233 234 if ((map->flags & VM_MAP_INTRSAFE) != 0) 235 ipl = IPL_VM; 236 else 237 ipl = IPL_NONE; 238 239 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa, 240 ipl); 241} 242 243void 244uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 245{ 246 247 map->flags |= VM_MAP_VACACHE; 248 if (size == 0) 249 size = KM_VACACHE_SIZE; 250 km_vacache_init(map, name, size); 251} 252 253#else /* !defined(PMAP_MAP_POOLPAGE) */ 254 255void 256uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 257{ 258 259 /* nothing */ 260} 261 262#endif /* !defined(PMAP_MAP_POOLPAGE) */ 263 264void 265uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags) 266{ 267 struct vm_map_kernel *vmk = vm_map_to_kernel(map); 268 const bool intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 269 int s = 0xdeadbeaf; /* XXX: gcc */ 270 271 if (intrsafe) { 272 s = splvm(); 273 } 274 callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL); 275 if (intrsafe) { 276 splx(s); 277 } 278} 279 280/* 281 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 282 * KVM already allocated for text, data, bss, and static data structures). 283 * 284 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 285 * we assume that [vmin -> start] has already been allocated and that 286 * "end" is the end. 287 */ 288 289void 290uvm_km_init(vaddr_t start, vaddr_t end) 291{ 292 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 293 294 /* 295 * next, init kernel memory objects. 296 */ 297 298 /* kernel_object: for pageable anonymous kernel memory */ 299 uao_init(); 300 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 301 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 302 303 /* 304 * init the map and reserve any space that might already 305 * have been allocated kernel space before installing. 306 */ 307 308 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 309 kernel_map_store.vmk_map.pmap = pmap_kernel(); 310 if (start != base) { 311 int error; 312 struct uvm_map_args args; 313 314 error = uvm_map_prepare(&kernel_map_store.vmk_map, 315 base, start - base, 316 NULL, UVM_UNKNOWN_OFFSET, 0, 317 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 318 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 319 if (!error) { 320 kernel_first_mapent_store.flags = 321 UVM_MAP_KERNEL | UVM_MAP_FIRST; 322 error = uvm_map_enter(&kernel_map_store.vmk_map, &args, 323 &kernel_first_mapent_store); 324 } 325 326 if (error) 327 panic( 328 "uvm_km_init: could not reserve space for kernel"); 329 } 330 331 /* 332 * install! 333 */ 334 335 kernel_map = &kernel_map_store.vmk_map; 336 uvm_km_vacache_init(kernel_map, "kvakernel", 0); 337} 338 339/* 340 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 341 * is allocated all references to that area of VM must go through it. this 342 * allows the locking of VAs in kernel_map to be broken up into regions. 343 * 344 * => if `fixed' is true, *vmin specifies where the region described 345 * by the submap must start 346 * => if submap is non NULL we use that as the submap, otherwise we 347 * alloc a new map 348 */ 349 350struct vm_map * 351uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 352 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 353 struct vm_map_kernel *submap) 354{ 355 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 356 357 KASSERT(vm_map_pmap(map) == pmap_kernel()); 358 359 size = round_page(size); /* round up to pagesize */ 360 size += uvm_mapent_overhead(size, flags); 361 362 /* 363 * first allocate a blank spot in the parent map 364 */ 365 366 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 367 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 368 UVM_ADV_RANDOM, mapflags)) != 0) { 369 panic("uvm_km_suballoc: unable to allocate space in parent map"); 370 } 371 372 /* 373 * set VM bounds (vmin is filled in by uvm_map) 374 */ 375 376 *vmax = *vmin + size; 377 378 /* 379 * add references to pmap and create or init the submap 380 */ 381 382 pmap_reference(vm_map_pmap(map)); 383 if (submap == NULL) { 384 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK); 385 if (submap == NULL) 386 panic("uvm_km_suballoc: unable to create submap"); 387 } 388 uvm_map_setup_kernel(submap, *vmin, *vmax, flags); 389 submap->vmk_map.pmap = vm_map_pmap(map); 390 391 /* 392 * now let uvm_map_submap plug in it... 393 */ 394 395 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0) 396 panic("uvm_km_suballoc: submap allocation failed"); 397 398 return(&submap->vmk_map); 399} 400 401/* 402 * uvm_km_pgremove: remove pages from a kernel uvm_object. 403 * 404 * => when you unmap a part of anonymous kernel memory you want to toss 405 * the pages right away. (this gets called from uvm_unmap_...). 406 */ 407 408void 409uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 410{ 411 struct uvm_object * const uobj = uvm_kernel_object; 412 const voff_t start = startva - vm_map_min(kernel_map); 413 const voff_t end = endva - vm_map_min(kernel_map); 414 struct vm_page *pg; 415 voff_t curoff, nextoff; 416 int swpgonlydelta = 0; 417 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 418 419 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 420 KASSERT(startva < endva); 421 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 422 423 simple_lock(&uobj->vmobjlock); 424 425 for (curoff = start; curoff < end; curoff = nextoff) { 426 nextoff = curoff + PAGE_SIZE; 427 pg = uvm_pagelookup(uobj, curoff); 428 if (pg != NULL && pg->flags & PG_BUSY) { 429 pg->flags |= PG_WANTED; 430 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 431 "km_pgrm", 0); 432 simple_lock(&uobj->vmobjlock); 433 nextoff = curoff; 434 continue; 435 } 436 437 /* 438 * free the swap slot, then the page. 439 */ 440 441 if (pg == NULL && 442 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 443 swpgonlydelta++; 444 } 445 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 446 if (pg != NULL) { 447 uvm_lock_pageq(); 448 uvm_pagefree(pg); 449 uvm_unlock_pageq(); 450 } 451 } 452 simple_unlock(&uobj->vmobjlock); 453 454 if (swpgonlydelta > 0) { 455 mutex_enter(&uvm_swap_data_lock); 456 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 457 uvmexp.swpgonly -= swpgonlydelta; 458 mutex_exit(&uvm_swap_data_lock); 459 } 460} 461 462 463/* 464 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 465 * regions. 466 * 467 * => when you unmap a part of anonymous kernel memory you want to toss 468 * the pages right away. (this is called from uvm_unmap_...). 469 * => none of the pages will ever be busy, and none of them will ever 470 * be on the active or inactive queues (because they have no object). 471 */ 472 473void 474uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 475{ 476 struct vm_page *pg; 477 paddr_t pa; 478 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 479 480 KASSERT(VM_MIN_KERNEL_ADDRESS <= start); 481 KASSERT(start < end); 482 KASSERT(end <= VM_MAX_KERNEL_ADDRESS); 483 484 for (; start < end; start += PAGE_SIZE) { 485 if (!pmap_extract(pmap_kernel(), start, &pa)) { 486 continue; 487 } 488 pg = PHYS_TO_VM_PAGE(pa); 489 KASSERT(pg); 490 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 491 uvm_pagefree(pg); 492 } 493} 494 495#if defined(DEBUG) 496void 497uvm_km_check_empty(vaddr_t start, vaddr_t end, bool intrsafe) 498{ 499 vaddr_t va; 500 paddr_t pa; 501 502 KDASSERT(VM_MIN_KERNEL_ADDRESS <= start); 503 KDASSERT(start < end); 504 KDASSERT(end <= VM_MAX_KERNEL_ADDRESS); 505 506 for (va = start; va < end; va += PAGE_SIZE) { 507 if (pmap_extract(pmap_kernel(), va, &pa)) { 508 panic("uvm_km_check_empty: va %p has pa 0x%llx", 509 (void *)va, (long long)pa); 510 } 511 if (!intrsafe) { 512 const struct vm_page *pg; 513 514 simple_lock(&uvm_kernel_object->vmobjlock); 515 pg = uvm_pagelookup(uvm_kernel_object, 516 va - vm_map_min(kernel_map)); 517 simple_unlock(&uvm_kernel_object->vmobjlock); 518 if (pg) { 519 panic("uvm_km_check_empty: " 520 "has page hashed at %p", (const void *)va); 521 } 522 } 523 } 524} 525#endif /* defined(DEBUG) */ 526 527/* 528 * uvm_km_alloc: allocate an area of kernel memory. 529 * 530 * => NOTE: we can return 0 even if we can wait if there is not enough 531 * free VM space in the map... caller should be prepared to handle 532 * this case. 533 * => we return KVA of memory allocated 534 */ 535 536vaddr_t 537uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 538{ 539 vaddr_t kva, loopva; 540 vaddr_t offset; 541 vsize_t loopsize; 542 struct vm_page *pg; 543 struct uvm_object *obj; 544 int pgaflags; 545 vm_prot_t prot; 546 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 547 548 KASSERT(vm_map_pmap(map) == pmap_kernel()); 549 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 550 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 551 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 552 553 /* 554 * setup for call 555 */ 556 557 kva = vm_map_min(map); /* hint */ 558 size = round_page(size); 559 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 560 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 561 map, obj, size, flags); 562 563 /* 564 * allocate some virtual space 565 */ 566 567 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 568 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 569 UVM_ADV_RANDOM, 570 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA)) 571 | UVM_FLAG_QUANTUM)) != 0)) { 572 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 573 return(0); 574 } 575 576 /* 577 * if all we wanted was VA, return now 578 */ 579 580 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 581 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 582 return(kva); 583 } 584 585 /* 586 * recover object offset from virtual address 587 */ 588 589 offset = kva - vm_map_min(kernel_map); 590 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 591 592 /* 593 * now allocate and map in the memory... note that we are the only ones 594 * whom should ever get a handle on this area of VM. 595 */ 596 597 loopva = kva; 598 loopsize = size; 599 600 pgaflags = UVM_PGA_USERESERVE; 601 if (flags & UVM_KMF_ZERO) 602 pgaflags |= UVM_PGA_ZERO; 603 prot = VM_PROT_READ | VM_PROT_WRITE; 604 if (flags & UVM_KMF_EXEC) 605 prot |= VM_PROT_EXECUTE; 606 while (loopsize) { 607 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 608 609 pg = uvm_pagealloc(NULL, offset, NULL, pgaflags); 610 611 /* 612 * out of memory? 613 */ 614 615 if (__predict_false(pg == NULL)) { 616 if ((flags & UVM_KMF_NOWAIT) || 617 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 618 /* free everything! */ 619 uvm_km_free(map, kva, size, 620 flags & UVM_KMF_TYPEMASK); 621 return (0); 622 } else { 623 uvm_wait("km_getwait2"); /* sleep here */ 624 continue; 625 } 626 } 627 628 pg->flags &= ~PG_BUSY; /* new page */ 629 UVM_PAGE_OWN(pg, NULL); 630 631 /* 632 * map it in 633 */ 634 635 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), prot); 636 loopva += PAGE_SIZE; 637 offset += PAGE_SIZE; 638 loopsize -= PAGE_SIZE; 639 } 640 641 pmap_update(pmap_kernel()); 642 643 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 644 return(kva); 645} 646 647/* 648 * uvm_km_free: free an area of kernel memory 649 */ 650 651void 652uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 653{ 654 655 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 656 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 657 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 658 KASSERT((addr & PAGE_MASK) == 0); 659 KASSERT(vm_map_pmap(map) == pmap_kernel()); 660 661 size = round_page(size); 662 663 if (flags & UVM_KMF_PAGEABLE) { 664 uvm_km_pgremove(addr, addr + size); 665 pmap_remove(pmap_kernel(), addr, addr + size); 666 } else if (flags & UVM_KMF_WIRED) { 667 uvm_km_pgremove_intrsafe(addr, addr + size); 668 pmap_kremove(addr, size); 669 } 670 671 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 672} 673 674/* Sanity; must specify both or none. */ 675#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 676 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 677#error Must specify MAP and UNMAP together. 678#endif 679 680/* 681 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 682 * 683 * => if the pmap specifies an alternate mapping method, we use it. 684 */ 685 686/* ARGSUSED */ 687vaddr_t 688uvm_km_alloc_poolpage_cache(struct vm_map *map, bool waitok) 689{ 690#if defined(PMAP_MAP_POOLPAGE) 691 return uvm_km_alloc_poolpage(map, waitok); 692#else 693 struct vm_page *pg; 694 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache; 695 vaddr_t va; 696 int s = 0xdeadbeaf; /* XXX: gcc */ 697 const bool intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 698 699 if ((map->flags & VM_MAP_VACACHE) == 0) 700 return uvm_km_alloc_poolpage(map, waitok); 701 702 if (intrsafe) 703 s = splvm(); 704 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT); 705 if (intrsafe) 706 splx(s); 707 if (va == 0) 708 return 0; 709 KASSERT(!pmap_extract(pmap_kernel(), va, NULL)); 710again: 711 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 712 if (__predict_false(pg == NULL)) { 713 if (waitok) { 714 uvm_wait("plpg"); 715 goto again; 716 } else { 717 if (intrsafe) 718 s = splvm(); 719 pool_put(pp, (void *)va); 720 if (intrsafe) 721 splx(s); 722 return 0; 723 } 724 } 725 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); 726 pmap_update(pmap_kernel()); 727 728 return va; 729#endif /* PMAP_MAP_POOLPAGE */ 730} 731 732vaddr_t 733uvm_km_alloc_poolpage(struct vm_map *map, bool waitok) 734{ 735#if defined(PMAP_MAP_POOLPAGE) 736 struct vm_page *pg; 737 vaddr_t va; 738 739 again: 740 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 741 if (__predict_false(pg == NULL)) { 742 if (waitok) { 743 uvm_wait("plpg"); 744 goto again; 745 } else 746 return (0); 747 } 748 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 749 if (__predict_false(va == 0)) 750 uvm_pagefree(pg); 751 return (va); 752#else 753 vaddr_t va; 754 int s = 0xdeadbeaf; /* XXX: gcc */ 755 const bool intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 756 757 if (intrsafe) 758 s = splvm(); 759 va = uvm_km_alloc(map, PAGE_SIZE, 0, 760 (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED); 761 if (intrsafe) 762 splx(s); 763 return (va); 764#endif /* PMAP_MAP_POOLPAGE */ 765} 766 767/* 768 * uvm_km_free_poolpage: free a previously allocated pool page 769 * 770 * => if the pmap specifies an alternate unmapping method, we use it. 771 */ 772 773/* ARGSUSED */ 774void 775uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr) 776{ 777#if defined(PMAP_UNMAP_POOLPAGE) 778 uvm_km_free_poolpage(map, addr); 779#else 780 struct pool *pp; 781 int s = 0xdeadbeaf; /* XXX: gcc */ 782 const bool intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 783 784 if ((map->flags & VM_MAP_VACACHE) == 0) { 785 uvm_km_free_poolpage(map, addr); 786 return; 787 } 788 789 KASSERT(pmap_extract(pmap_kernel(), addr, NULL)); 790 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE); 791 pmap_kremove(addr, PAGE_SIZE); 792#if defined(DEBUG) 793 pmap_update(pmap_kernel()); 794#endif 795 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL)); 796 pp = &vm_map_to_kernel(map)->vmk_vacache; 797 if (intrsafe) 798 s = splvm(); 799 pool_put(pp, (void *)addr); 800 if (intrsafe) 801 splx(s); 802#endif 803} 804 805/* ARGSUSED */ 806void 807uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr) 808{ 809#if defined(PMAP_UNMAP_POOLPAGE) 810 paddr_t pa; 811 812 pa = PMAP_UNMAP_POOLPAGE(addr); 813 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 814#else 815 int s = 0xdeadbeaf; /* XXX: gcc */ 816 const bool intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 817 818 if (intrsafe) 819 s = splvm(); 820 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED); 821 if (intrsafe) 822 splx(s); 823#endif /* PMAP_UNMAP_POOLPAGE */ 824} 825