uvm_km.c revision 1.88
1/* $NetBSD: uvm_km.c,v 1.88 2006/05/25 14:27:28 yamt Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * uvm_km.c: handle kernel memory allocation and management 71 */ 72 73/* 74 * overview of kernel memory management: 75 * 76 * the kernel virtual address space is mapped by "kernel_map." kernel_map 77 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 78 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 79 * 80 * the kernel_map has several "submaps." submaps can only appear in 81 * the kernel_map (user processes can't use them). submaps "take over" 82 * the management of a sub-range of the kernel's address space. submaps 83 * are typically allocated at boot time and are never released. kernel 84 * virtual address space that is mapped by a submap is locked by the 85 * submap's lock -- not the kernel_map's lock. 86 * 87 * thus, the useful feature of submaps is that they allow us to break 88 * up the locking and protection of the kernel address space into smaller 89 * chunks. 90 * 91 * the vm system has several standard kernel submaps, including: 92 * kmem_map => contains only wired kernel memory for the kernel 93 * malloc. *** access to kmem_map must be protected 94 * by splvm() because we are allowed to call malloc() 95 * at interrupt time *** 96 * mb_map => memory for large mbufs, *** protected by splvm *** 97 * pager_map => used to map "buf" structures into kernel space 98 * exec_map => used during exec to handle exec args 99 * etc... 100 * 101 * the kernel allocates its private memory out of special uvm_objects whose 102 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 103 * are "special" and never die). all kernel objects should be thought of 104 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 105 * object is equal to the size of kernel virtual address space (i.e. the 106 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 107 * 108 * note that just because a kernel object spans the entire kernel virutal 109 * address space doesn't mean that it has to be mapped into the entire space. 110 * large chunks of a kernel object's space go unused either because 111 * that area of kernel VM is unmapped, or there is some other type of 112 * object mapped into that range (e.g. a vnode). for submap's kernel 113 * objects, the only part of the object that can ever be populated is the 114 * offsets that are managed by the submap. 115 * 116 * note that the "offset" in a kernel object is always the kernel virtual 117 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 118 * example: 119 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 120 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 121 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 122 * then that means that the page at offset 0x235000 in kernel_object is 123 * mapped at 0xf8235000. 124 * 125 * kernel object have one other special property: when the kernel virtual 126 * memory mapping them is unmapped, the backing memory in the object is 127 * freed right away. this is done with the uvm_km_pgremove() function. 128 * this has to be done because there is no backing store for kernel pages 129 * and no need to save them after they are no longer referenced. 130 */ 131 132#include <sys/cdefs.h> 133__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.88 2006/05/25 14:27:28 yamt Exp $"); 134 135#include "opt_uvmhist.h" 136 137#include <sys/param.h> 138#include <sys/malloc.h> 139#include <sys/systm.h> 140#include <sys/proc.h> 141#include <sys/pool.h> 142 143#include <uvm/uvm.h> 144 145/* 146 * global data structures 147 */ 148 149struct vm_map *kernel_map = NULL; 150 151/* 152 * local data structues 153 */ 154 155static struct vm_map_kernel kernel_map_store; 156static struct vm_map_entry kernel_first_mapent_store; 157 158#if !defined(PMAP_MAP_POOLPAGE) 159 160/* 161 * kva cache 162 * 163 * XXX maybe it's better to do this at the uvm_map layer. 164 */ 165 166#define KM_VACACHE_SIZE (32 * PAGE_SIZE) /* XXX tune */ 167 168static void *km_vacache_alloc(struct pool *, int); 169static void km_vacache_free(struct pool *, void *); 170static void km_vacache_init(struct vm_map *, const char *, size_t); 171 172/* XXX */ 173#define KM_VACACHE_POOL_TO_MAP(pp) \ 174 ((struct vm_map *)((char *)(pp) - \ 175 offsetof(struct vm_map_kernel, vmk_vacache))) 176 177static void * 178km_vacache_alloc(struct pool *pp, int flags) 179{ 180 vaddr_t va; 181 size_t size; 182 struct vm_map *map; 183 size = pp->pr_alloc->pa_pagesz; 184 185 map = KM_VACACHE_POOL_TO_MAP(pp); 186 187 va = vm_map_min(map); /* hint */ 188 if (uvm_map(map, &va, size, NULL, UVM_UNKNOWN_OFFSET, size, 189 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 190 UVM_ADV_RANDOM, UVM_FLAG_QUANTUM | 191 ((flags & PR_WAITOK) ? UVM_FLAG_WAITVA : 192 UVM_FLAG_TRYLOCK | UVM_FLAG_NOWAIT)))) 193 return NULL; 194 195 return (void *)va; 196} 197 198static void 199km_vacache_free(struct pool *pp, void *v) 200{ 201 vaddr_t va = (vaddr_t)v; 202 size_t size = pp->pr_alloc->pa_pagesz; 203 struct vm_map *map; 204 205 map = KM_VACACHE_POOL_TO_MAP(pp); 206 uvm_unmap1(map, va, va + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 207} 208 209/* 210 * km_vacache_init: initialize kva cache. 211 */ 212 213static void 214km_vacache_init(struct vm_map *map, const char *name, size_t size) 215{ 216 struct vm_map_kernel *vmk; 217 struct pool *pp; 218 struct pool_allocator *pa; 219 220 KASSERT(VM_MAP_IS_KERNEL(map)); 221 KASSERT(size < (vm_map_max(map) - vm_map_min(map)) / 2); /* sanity */ 222 223 vmk = vm_map_to_kernel(map); 224 pp = &vmk->vmk_vacache; 225 pa = &vmk->vmk_vacache_allocator; 226 memset(pa, 0, sizeof(*pa)); 227 pa->pa_alloc = km_vacache_alloc; 228 pa->pa_free = km_vacache_free; 229 pa->pa_pagesz = (unsigned int)size; 230 pa->pa_backingmap = map; 231 pa->pa_backingmapptr = NULL; 232 pool_init(pp, PAGE_SIZE, 0, 0, PR_NOTOUCH | PR_RECURSIVE, name, pa); 233} 234 235void 236uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 237{ 238 239 map->flags |= VM_MAP_VACACHE; 240 if (size == 0) 241 size = KM_VACACHE_SIZE; 242 km_vacache_init(map, name, size); 243} 244 245#else /* !defined(PMAP_MAP_POOLPAGE) */ 246 247void 248uvm_km_vacache_init(struct vm_map *map, const char *name, size_t size) 249{ 250 251 /* nothing */ 252} 253 254#endif /* !defined(PMAP_MAP_POOLPAGE) */ 255 256void 257uvm_km_va_drain(struct vm_map *map, uvm_flag_t flags) 258{ 259 struct vm_map_kernel *vmk = vm_map_to_kernel(map); 260 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 261 int s = 0xdeadbeaf; /* XXX: gcc */ 262 263 if (intrsafe) { 264 s = splvm(); 265 } 266 callback_run_roundrobin(&vmk->vmk_reclaim_callback, NULL); 267 if (intrsafe) { 268 splx(s); 269 } 270} 271 272/* 273 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 274 * KVM already allocated for text, data, bss, and static data structures). 275 * 276 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 277 * we assume that [vmin -> start] has already been allocated and that 278 * "end" is the end. 279 */ 280 281void 282uvm_km_init(vaddr_t start, vaddr_t end) 283{ 284 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 285 286 /* 287 * next, init kernel memory objects. 288 */ 289 290 /* kernel_object: for pageable anonymous kernel memory */ 291 uao_init(); 292 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 293 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 294 295 /* 296 * init the map and reserve any space that might already 297 * have been allocated kernel space before installing. 298 */ 299 300 uvm_map_setup_kernel(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 301 kernel_map_store.vmk_map.pmap = pmap_kernel(); 302 if (start != base) { 303 int error; 304 struct uvm_map_args args; 305 306 error = uvm_map_prepare(&kernel_map_store.vmk_map, 307 base, start - base, 308 NULL, UVM_UNKNOWN_OFFSET, 0, 309 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 310 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 311 if (!error) { 312 kernel_first_mapent_store.flags = 313 UVM_MAP_KERNEL | UVM_MAP_FIRST; 314 error = uvm_map_enter(&kernel_map_store.vmk_map, &args, 315 &kernel_first_mapent_store); 316 } 317 318 if (error) 319 panic( 320 "uvm_km_init: could not reserve space for kernel"); 321 } 322 323 /* 324 * install! 325 */ 326 327 kernel_map = &kernel_map_store.vmk_map; 328 uvm_km_vacache_init(kernel_map, "kvakernel", 0); 329} 330 331/* 332 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 333 * is allocated all references to that area of VM must go through it. this 334 * allows the locking of VAs in kernel_map to be broken up into regions. 335 * 336 * => if `fixed' is true, *vmin specifies where the region described 337 * by the submap must start 338 * => if submap is non NULL we use that as the submap, otherwise we 339 * alloc a new map 340 */ 341 342struct vm_map * 343uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 344 vaddr_t *vmax /* OUT */, vsize_t size, int flags, boolean_t fixed, 345 struct vm_map_kernel *submap) 346{ 347 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 348 349 KASSERT(vm_map_pmap(map) == pmap_kernel()); 350 351 size = round_page(size); /* round up to pagesize */ 352 size += uvm_mapent_overhead(size, flags); 353 354 /* 355 * first allocate a blank spot in the parent map 356 */ 357 358 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 359 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 360 UVM_ADV_RANDOM, mapflags)) != 0) { 361 panic("uvm_km_suballoc: unable to allocate space in parent map"); 362 } 363 364 /* 365 * set VM bounds (vmin is filled in by uvm_map) 366 */ 367 368 *vmax = *vmin + size; 369 370 /* 371 * add references to pmap and create or init the submap 372 */ 373 374 pmap_reference(vm_map_pmap(map)); 375 if (submap == NULL) { 376 submap = malloc(sizeof(*submap), M_VMMAP, M_WAITOK); 377 if (submap == NULL) 378 panic("uvm_km_suballoc: unable to create submap"); 379 } 380 uvm_map_setup_kernel(submap, *vmin, *vmax, flags); 381 submap->vmk_map.pmap = vm_map_pmap(map); 382 383 /* 384 * now let uvm_map_submap plug in it... 385 */ 386 387 if (uvm_map_submap(map, *vmin, *vmax, &submap->vmk_map) != 0) 388 panic("uvm_km_suballoc: submap allocation failed"); 389 390 return(&submap->vmk_map); 391} 392 393/* 394 * uvm_km_pgremove: remove pages from a kernel uvm_object. 395 * 396 * => when you unmap a part of anonymous kernel memory you want to toss 397 * the pages right away. (this gets called from uvm_unmap_...). 398 */ 399 400void 401uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 402{ 403 struct uvm_object * const uobj = uvm.kernel_object; 404 const voff_t start = startva - vm_map_min(kernel_map); 405 const voff_t end = endva - vm_map_min(kernel_map); 406 struct vm_page *pg; 407 voff_t curoff, nextoff; 408 int swpgonlydelta = 0; 409 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 410 411 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 412 KASSERT(startva < endva); 413 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 414 415 simple_lock(&uobj->vmobjlock); 416 417 for (curoff = start; curoff < end; curoff = nextoff) { 418 nextoff = curoff + PAGE_SIZE; 419 pg = uvm_pagelookup(uobj, curoff); 420 if (pg != NULL && pg->flags & PG_BUSY) { 421 pg->flags |= PG_WANTED; 422 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 423 "km_pgrm", 0); 424 simple_lock(&uobj->vmobjlock); 425 nextoff = curoff; 426 continue; 427 } 428 429 /* 430 * free the swap slot, then the page. 431 */ 432 433 if (pg == NULL && 434 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 435 swpgonlydelta++; 436 } 437 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 438 if (pg != NULL) { 439 uvm_lock_pageq(); 440 uvm_pagefree(pg); 441 uvm_unlock_pageq(); 442 } 443 } 444 simple_unlock(&uobj->vmobjlock); 445 446 if (swpgonlydelta > 0) { 447 simple_lock(&uvm.swap_data_lock); 448 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 449 uvmexp.swpgonly -= swpgonlydelta; 450 simple_unlock(&uvm.swap_data_lock); 451 } 452} 453 454 455/* 456 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 457 * regions. 458 * 459 * => when you unmap a part of anonymous kernel memory you want to toss 460 * the pages right away. (this is called from uvm_unmap_...). 461 * => none of the pages will ever be busy, and none of them will ever 462 * be on the active or inactive queues (because they have no object). 463 */ 464 465void 466uvm_km_pgremove_intrsafe(vaddr_t start, vaddr_t end) 467{ 468 struct vm_page *pg; 469 paddr_t pa; 470 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 471 472 KASSERT(VM_MIN_KERNEL_ADDRESS <= start); 473 KASSERT(start < end); 474 KASSERT(end <= VM_MAX_KERNEL_ADDRESS); 475 476 for (; start < end; start += PAGE_SIZE) { 477 if (!pmap_extract(pmap_kernel(), start, &pa)) { 478 continue; 479 } 480 pg = PHYS_TO_VM_PAGE(pa); 481 KASSERT(pg); 482 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 483 uvm_pagefree(pg); 484 } 485} 486 487#if defined(DEBUG) 488void 489uvm_km_check_empty(vaddr_t start, vaddr_t end, boolean_t intrsafe) 490{ 491 vaddr_t va; 492 paddr_t pa; 493 494 KDASSERT(VM_MIN_KERNEL_ADDRESS <= start); 495 KDASSERT(start < end); 496 KDASSERT(end <= VM_MAX_KERNEL_ADDRESS); 497 498 for (va = start; va < end; va += PAGE_SIZE) { 499 if (pmap_extract(pmap_kernel(), va, &pa)) { 500 panic("uvm_km_check_empty: va %p has pa 0x%llx", 501 (void *)va, (long long)pa); 502 } 503 if (!intrsafe) { 504 const struct vm_page *pg; 505 506 simple_lock(&uvm.kernel_object->vmobjlock); 507 pg = uvm_pagelookup(uvm.kernel_object, 508 va - vm_map_min(kernel_map)); 509 simple_unlock(&uvm.kernel_object->vmobjlock); 510 if (pg) { 511 panic("uvm_km_check_empty: " 512 "has page hashed at %p", (const void *)va); 513 } 514 } 515 } 516} 517#endif /* defined(DEBUG) */ 518 519/* 520 * uvm_km_alloc: allocate an area of kernel memory. 521 * 522 * => NOTE: we can return 0 even if we can wait if there is not enough 523 * free VM space in the map... caller should be prepared to handle 524 * this case. 525 * => we return KVA of memory allocated 526 */ 527 528vaddr_t 529uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 530{ 531 vaddr_t kva, loopva; 532 vaddr_t offset; 533 vsize_t loopsize; 534 struct vm_page *pg; 535 struct uvm_object *obj; 536 int pgaflags; 537 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 538 539 KASSERT(vm_map_pmap(map) == pmap_kernel()); 540 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 541 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 542 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 543 544 /* 545 * setup for call 546 */ 547 548 kva = vm_map_min(map); /* hint */ 549 size = round_page(size); 550 obj = (flags & UVM_KMF_PAGEABLE) ? uvm.kernel_object : NULL; 551 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 552 map, obj, size, flags); 553 554 /* 555 * allocate some virtual space 556 */ 557 558 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 559 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 560 UVM_ADV_RANDOM, 561 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA)) 562 | UVM_FLAG_QUANTUM)) != 0)) { 563 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 564 return(0); 565 } 566 567 /* 568 * if all we wanted was VA, return now 569 */ 570 571 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 572 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 573 return(kva); 574 } 575 576 /* 577 * recover object offset from virtual address 578 */ 579 580 offset = kva - vm_map_min(kernel_map); 581 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 582 583 /* 584 * now allocate and map in the memory... note that we are the only ones 585 * whom should ever get a handle on this area of VM. 586 */ 587 588 loopva = kva; 589 loopsize = size; 590 591 pgaflags = UVM_PGA_USERESERVE; 592 if (flags & UVM_KMF_ZERO) 593 pgaflags |= UVM_PGA_ZERO; 594 while (loopsize) { 595 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 596 597 pg = uvm_pagealloc(NULL, offset, NULL, pgaflags); 598 599 /* 600 * out of memory? 601 */ 602 603 if (__predict_false(pg == NULL)) { 604 if ((flags & UVM_KMF_NOWAIT) || 605 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 606 /* free everything! */ 607 uvm_km_free(map, kva, size, 608 flags & UVM_KMF_TYPEMASK); 609 return (0); 610 } else { 611 uvm_wait("km_getwait2"); /* sleep here */ 612 continue; 613 } 614 } 615 616 pg->flags &= ~PG_BUSY; /* new page */ 617 UVM_PAGE_OWN(pg, NULL); 618 619 /* 620 * map it in 621 */ 622 623 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 624 VM_PROT_READ | VM_PROT_WRITE); 625 loopva += PAGE_SIZE; 626 offset += PAGE_SIZE; 627 loopsize -= PAGE_SIZE; 628 } 629 630 pmap_update(pmap_kernel()); 631 632 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 633 return(kva); 634} 635 636/* 637 * uvm_km_free: free an area of kernel memory 638 */ 639 640void 641uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 642{ 643 644 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 645 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 646 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 647 KASSERT((addr & PAGE_MASK) == 0); 648 KASSERT(vm_map_pmap(map) == pmap_kernel()); 649 650 size = round_page(size); 651 652 if (flags & UVM_KMF_PAGEABLE) { 653 uvm_km_pgremove(addr, addr + size); 654 pmap_remove(pmap_kernel(), addr, addr + size); 655 } else if (flags & UVM_KMF_WIRED) { 656 uvm_km_pgremove_intrsafe(addr, addr + size); 657 pmap_kremove(addr, size); 658 } 659 660 uvm_unmap1(map, addr, addr + size, UVM_FLAG_QUANTUM|UVM_FLAG_VAONLY); 661} 662 663/* Sanity; must specify both or none. */ 664#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 665 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 666#error Must specify MAP and UNMAP together. 667#endif 668 669/* 670 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 671 * 672 * => if the pmap specifies an alternate mapping method, we use it. 673 */ 674 675/* ARGSUSED */ 676vaddr_t 677uvm_km_alloc_poolpage_cache(struct vm_map *map, boolean_t waitok) 678{ 679#if defined(PMAP_MAP_POOLPAGE) 680 return uvm_km_alloc_poolpage(map, waitok); 681#else 682 struct vm_page *pg; 683 struct pool *pp = &vm_map_to_kernel(map)->vmk_vacache; 684 vaddr_t va; 685 int s = 0xdeadbeaf; /* XXX: gcc */ 686 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 687 688 if ((map->flags & VM_MAP_VACACHE) == 0) 689 return uvm_km_alloc_poolpage(map, waitok); 690 691 if (intrsafe) 692 s = splvm(); 693 va = (vaddr_t)pool_get(pp, waitok ? PR_WAITOK : PR_NOWAIT); 694 if (intrsafe) 695 splx(s); 696 if (va == 0) 697 return 0; 698 KASSERT(!pmap_extract(pmap_kernel(), va, NULL)); 699again: 700 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 701 if (__predict_false(pg == NULL)) { 702 if (waitok) { 703 uvm_wait("plpg"); 704 goto again; 705 } else { 706 if (intrsafe) 707 s = splvm(); 708 pool_put(pp, (void *)va); 709 if (intrsafe) 710 splx(s); 711 return 0; 712 } 713 } 714 pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg), VM_PROT_READ|VM_PROT_WRITE); 715 pmap_update(pmap_kernel()); 716 717 return va; 718#endif /* PMAP_MAP_POOLPAGE */ 719} 720 721vaddr_t 722uvm_km_alloc_poolpage(struct vm_map *map, boolean_t waitok) 723{ 724#if defined(PMAP_MAP_POOLPAGE) 725 struct vm_page *pg; 726 vaddr_t va; 727 728 again: 729 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 730 if (__predict_false(pg == NULL)) { 731 if (waitok) { 732 uvm_wait("plpg"); 733 goto again; 734 } else 735 return (0); 736 } 737 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 738 if (__predict_false(va == 0)) 739 uvm_pagefree(pg); 740 return (va); 741#else 742 vaddr_t va; 743 int s = 0xdeadbeaf; /* XXX: gcc */ 744 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 745 746 if (intrsafe) 747 s = splvm(); 748 va = uvm_km_alloc(map, PAGE_SIZE, 0, 749 (waitok ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) | UVM_KMF_WIRED); 750 if (intrsafe) 751 splx(s); 752 return (va); 753#endif /* PMAP_MAP_POOLPAGE */ 754} 755 756/* 757 * uvm_km_free_poolpage: free a previously allocated pool page 758 * 759 * => if the pmap specifies an alternate unmapping method, we use it. 760 */ 761 762/* ARGSUSED */ 763void 764uvm_km_free_poolpage_cache(struct vm_map *map, vaddr_t addr) 765{ 766#if defined(PMAP_UNMAP_POOLPAGE) 767 uvm_km_free_poolpage(map, addr); 768#else 769 struct pool *pp; 770 int s = 0xdeadbeaf; /* XXX: gcc */ 771 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 772 773 if ((map->flags & VM_MAP_VACACHE) == 0) { 774 uvm_km_free_poolpage(map, addr); 775 return; 776 } 777 778 KASSERT(pmap_extract(pmap_kernel(), addr, NULL)); 779 uvm_km_pgremove_intrsafe(addr, addr + PAGE_SIZE); 780 pmap_kremove(addr, PAGE_SIZE); 781#if defined(DEBUG) 782 pmap_update(pmap_kernel()); 783#endif 784 KASSERT(!pmap_extract(pmap_kernel(), addr, NULL)); 785 pp = &vm_map_to_kernel(map)->vmk_vacache; 786 if (intrsafe) 787 s = splvm(); 788 pool_put(pp, (void *)addr); 789 if (intrsafe) 790 splx(s); 791#endif 792} 793 794/* ARGSUSED */ 795void 796uvm_km_free_poolpage(struct vm_map *map, vaddr_t addr) 797{ 798#if defined(PMAP_UNMAP_POOLPAGE) 799 paddr_t pa; 800 801 pa = PMAP_UNMAP_POOLPAGE(addr); 802 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 803#else 804 int s = 0xdeadbeaf; /* XXX: gcc */ 805 const boolean_t intrsafe = (map->flags & VM_MAP_INTRSAFE) != 0; 806 807 if (intrsafe) 808 s = splvm(); 809 uvm_km_free(map, addr, PAGE_SIZE, UVM_KMF_WIRED); 810 if (intrsafe) 811 splx(s); 812#endif /* PMAP_UNMAP_POOLPAGE */ 813} 814