uvm_km.c revision 1.113
1/* $NetBSD: uvm_km.c,v 1.113 2012/01/29 12:37:01 para Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps, including: 87 * pager_map => used to map "buf" structures into kernel space 88 * exec_map => used during exec to handle exec args 89 * etc... 90 * 91 * the kernel allocates its private memory out of special uvm_objects whose 92 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 93 * are "special" and never die). all kernel objects should be thought of 94 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 95 * object is equal to the size of kernel virtual address space (i.e. the 96 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 97 * 98 * note that just because a kernel object spans the entire kernel virtual 99 * address space doesn't mean that it has to be mapped into the entire space. 100 * large chunks of a kernel object's space go unused either because 101 * that area of kernel VM is unmapped, or there is some other type of 102 * object mapped into that range (e.g. a vnode). for submap's kernel 103 * objects, the only part of the object that can ever be populated is the 104 * offsets that are managed by the submap. 105 * 106 * note that the "offset" in a kernel object is always the kernel virtual 107 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 108 * example: 109 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 110 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 111 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 112 * then that means that the page at offset 0x235000 in kernel_object is 113 * mapped at 0xf8235000. 114 * 115 * kernel object have one other special property: when the kernel virtual 116 * memory mapping them is unmapped, the backing memory in the object is 117 * freed right away. this is done with the uvm_km_pgremove() function. 118 * this has to be done because there is no backing store for kernel pages 119 * and no need to save them after they are no longer referenced. 120 */ 121 122#include <sys/cdefs.h> 123__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.113 2012/01/29 12:37:01 para Exp $"); 124 125#include "opt_uvmhist.h" 126 127#include <sys/param.h> 128#include <sys/systm.h> 129#include <sys/proc.h> 130#include <sys/pool.h> 131#include <sys/vmem.h> 132#include <sys/kmem.h> 133 134#include <uvm/uvm.h> 135 136/* 137 * global data structures 138 */ 139 140struct vm_map *kernel_map = NULL; 141 142/* 143 * local data structues 144 */ 145 146static struct vm_map kernel_map_store; 147static struct vm_map_entry kernel_image_mapent_store; 148static struct vm_map_entry kernel_kmem_mapent_store; 149 150vaddr_t kmembase; 151vsize_t kmemsize; 152 153vmem_t *kmem_arena; 154vmem_t *kmem_va_arena; 155 156/* 157 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 158 * KVM already allocated for text, data, bss, and static data structures). 159 * 160 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 161 * we assume that [vmin -> start] has already been allocated and that 162 * "end" is the end. 163 */ 164 165void 166uvm_km_bootstrap(vaddr_t start, vaddr_t end) 167{ 168 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 169 170 kmemsize = MIN(((((vsize_t)(end - start)) / 3) * 2), 171 ((((vsize_t)uvmexp.npages) * PAGE_SIZE))); 172 kmemsize = round_page(kmemsize); 173 174 /* 175 * next, init kernel memory objects. 176 */ 177 178 /* kernel_object: for pageable anonymous kernel memory */ 179 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 180 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 181 182 /* 183 * init the map and reserve any space that might already 184 * have been allocated kernel space before installing. 185 */ 186 187 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 188 kernel_map_store.pmap = pmap_kernel(); 189 if (start != base) { 190 int error; 191 struct uvm_map_args args; 192 193 error = uvm_map_prepare(&kernel_map_store, 194 base, start - base, 195 NULL, UVM_UNKNOWN_OFFSET, 0, 196 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 197 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 198 if (!error) { 199 kernel_image_mapent_store.flags = 200 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 201 error = uvm_map_enter(&kernel_map_store, &args, 202 &kernel_image_mapent_store); 203 } 204 205 if (error) 206 panic( 207 "uvm_km_bootstrap: could not reserve space for kernel"); 208 209 kmembase = args.uma_start + args.uma_size; 210 error = uvm_map_prepare(&kernel_map_store, 211 kmembase, kmemsize, 212 NULL, UVM_UNKNOWN_OFFSET, 0, 213 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 214 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 215 if (!error) { 216 kernel_kmem_mapent_store.flags = 217 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 218 error = uvm_map_enter(&kernel_map_store, &args, 219 &kernel_kmem_mapent_store); 220 } 221 222 if (error) 223 panic( 224 "uvm_km_bootstrap: could not reserve kernel kmem"); 225 } 226 227 /* 228 * install! 229 */ 230 231 kernel_map = &kernel_map_store; 232 233 pool_subsystem_init(); 234 vmem_bootstrap(); 235 236 kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE, 237 NULL, NULL, NULL, 238 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 239 240 vmem_init(kmem_arena); 241 242 kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 243 vmem_alloc, vmem_free, kmem_arena, 244 16 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 245} 246 247/* 248 * uvm_km_init: init the kernel maps virtual memory caches 249 * and start the pool/kmem allocator. 250 */ 251void 252uvm_km_init(void) 253{ 254 255 kmem_init(); 256 257 kmeminit(); // killme 258} 259 260/* 261 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 262 * is allocated all references to that area of VM must go through it. this 263 * allows the locking of VAs in kernel_map to be broken up into regions. 264 * 265 * => if `fixed' is true, *vmin specifies where the region described 266 * pager_map => used to map "buf" structures into kernel space 267 * by the submap must start 268 * => if submap is non NULL we use that as the submap, otherwise we 269 * alloc a new map 270 */ 271 272struct vm_map * 273uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 274 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 275 struct vm_map *submap) 276{ 277 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 278 279 KASSERT(vm_map_pmap(map) == pmap_kernel()); 280 281 size = round_page(size); /* round up to pagesize */ 282 283 /* 284 * first allocate a blank spot in the parent map 285 */ 286 287 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 288 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 289 UVM_ADV_RANDOM, mapflags)) != 0) { 290 panic("uvm_km_suballoc: unable to allocate space in parent map"); 291 } 292 293 /* 294 * set VM bounds (vmin is filled in by uvm_map) 295 */ 296 297 *vmax = *vmin + size; 298 299 /* 300 * add references to pmap and create or init the submap 301 */ 302 303 pmap_reference(vm_map_pmap(map)); 304 if (submap == NULL) { 305 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 306 if (submap == NULL) 307 panic("uvm_km_suballoc: unable to create submap"); 308 } 309 uvm_map_setup(submap, *vmin, *vmax, flags); 310 submap->pmap = vm_map_pmap(map); 311 312 /* 313 * now let uvm_map_submap plug in it... 314 */ 315 316 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 317 panic("uvm_km_suballoc: submap allocation failed"); 318 319 return(submap); 320} 321 322/* 323 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 324 */ 325 326void 327uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 328{ 329 struct uvm_object * const uobj = uvm_kernel_object; 330 const voff_t start = startva - vm_map_min(kernel_map); 331 const voff_t end = endva - vm_map_min(kernel_map); 332 struct vm_page *pg; 333 voff_t curoff, nextoff; 334 int swpgonlydelta = 0; 335 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 336 337 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 338 KASSERT(startva < endva); 339 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 340 341 mutex_enter(uobj->vmobjlock); 342 pmap_remove(pmap_kernel(), startva, endva); 343 for (curoff = start; curoff < end; curoff = nextoff) { 344 nextoff = curoff + PAGE_SIZE; 345 pg = uvm_pagelookup(uobj, curoff); 346 if (pg != NULL && pg->flags & PG_BUSY) { 347 pg->flags |= PG_WANTED; 348 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 349 "km_pgrm", 0); 350 mutex_enter(uobj->vmobjlock); 351 nextoff = curoff; 352 continue; 353 } 354 355 /* 356 * free the swap slot, then the page. 357 */ 358 359 if (pg == NULL && 360 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 361 swpgonlydelta++; 362 } 363 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 364 if (pg != NULL) { 365 mutex_enter(&uvm_pageqlock); 366 uvm_pagefree(pg); 367 mutex_exit(&uvm_pageqlock); 368 } 369 } 370 mutex_exit(uobj->vmobjlock); 371 372 if (swpgonlydelta > 0) { 373 mutex_enter(&uvm_swap_data_lock); 374 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 375 uvmexp.swpgonly -= swpgonlydelta; 376 mutex_exit(&uvm_swap_data_lock); 377 } 378} 379 380 381/* 382 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 383 * regions. 384 * 385 * => when you unmap a part of anonymous kernel memory you want to toss 386 * the pages right away. (this is called from uvm_unmap_...). 387 * => none of the pages will ever be busy, and none of them will ever 388 * be on the active or inactive queues (because they have no object). 389 */ 390 391void 392uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 393{ 394 struct vm_page *pg; 395 paddr_t pa; 396 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 397 398 KASSERT(VM_MAP_IS_KERNEL(map)); 399 KASSERT(vm_map_min(map) <= start); 400 KASSERT(start < end); 401 KASSERT(end <= vm_map_max(map)); 402 403 for (; start < end; start += PAGE_SIZE) { 404 if (!pmap_extract(pmap_kernel(), start, &pa)) { 405 continue; 406 } 407 pg = PHYS_TO_VM_PAGE(pa); 408 KASSERT(pg); 409 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 410 KASSERT((pg->flags & PG_BUSY) == 0); 411 uvm_pagefree(pg); 412 } 413} 414 415#if defined(DEBUG) 416void 417uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 418{ 419 struct vm_page *pg; 420 vaddr_t va; 421 paddr_t pa; 422 423 KDASSERT(VM_MAP_IS_KERNEL(map)); 424 KDASSERT(vm_map_min(map) <= start); 425 KDASSERT(start < end); 426 KDASSERT(end <= vm_map_max(map)); 427 428 for (va = start; va < end; va += PAGE_SIZE) { 429 if (pmap_extract(pmap_kernel(), va, &pa)) { 430 panic("uvm_km_check_empty: va %p has pa 0x%llx", 431 (void *)va, (long long)pa); 432 } 433 if ((map->flags & VM_MAP_INTRSAFE) == 0) { 434 mutex_enter(uvm_kernel_object->vmobjlock); 435 pg = uvm_pagelookup(uvm_kernel_object, 436 va - vm_map_min(kernel_map)); 437 mutex_exit(uvm_kernel_object->vmobjlock); 438 if (pg) { 439 panic("uvm_km_check_empty: " 440 "has page hashed at %p", (const void *)va); 441 } 442 } 443 } 444} 445#endif /* defined(DEBUG) */ 446 447/* 448 * uvm_km_alloc: allocate an area of kernel memory. 449 * 450 * => NOTE: we can return 0 even if we can wait if there is not enough 451 * free VM space in the map... caller should be prepared to handle 452 * this case. 453 * => we return KVA of memory allocated 454 */ 455 456vaddr_t 457uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 458{ 459 vaddr_t kva, loopva; 460 vaddr_t offset; 461 vsize_t loopsize; 462 struct vm_page *pg; 463 struct uvm_object *obj; 464 int pgaflags; 465 vm_prot_t prot; 466 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 467 468 KASSERT(vm_map_pmap(map) == pmap_kernel()); 469 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 470 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 471 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 472 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 473 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 474 475 /* 476 * setup for call 477 */ 478 479 kva = vm_map_min(map); /* hint */ 480 size = round_page(size); 481 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 482 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 483 map, obj, size, flags); 484 485 /* 486 * allocate some virtual space 487 */ 488 489 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 490 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 491 UVM_ADV_RANDOM, 492 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 493 | UVM_KMF_COLORMATCH)))) != 0)) { 494 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 495 return(0); 496 } 497 498 /* 499 * if all we wanted was VA, return now 500 */ 501 502 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 503 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 504 return(kva); 505 } 506 507 /* 508 * recover object offset from virtual address 509 */ 510 511 offset = kva - vm_map_min(kernel_map); 512 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 513 514 /* 515 * now allocate and map in the memory... note that we are the only ones 516 * whom should ever get a handle on this area of VM. 517 */ 518 519 loopva = kva; 520 loopsize = size; 521 522 pgaflags = UVM_FLAG_COLORMATCH; 523 if (flags & UVM_KMF_NOWAIT) 524 pgaflags |= UVM_PGA_USERESERVE; 525 if (flags & UVM_KMF_ZERO) 526 pgaflags |= UVM_PGA_ZERO; 527 prot = VM_PROT_READ | VM_PROT_WRITE; 528 if (flags & UVM_KMF_EXEC) 529 prot |= VM_PROT_EXECUTE; 530 while (loopsize) { 531 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 532 533 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 534#ifdef UVM_KM_VMFREELIST 535 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 536#else 537 UVM_PGA_STRAT_NORMAL, 0 538#endif 539 ); 540 541 /* 542 * out of memory? 543 */ 544 545 if (__predict_false(pg == NULL)) { 546 if ((flags & UVM_KMF_NOWAIT) || 547 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 548 /* free everything! */ 549 uvm_km_free(map, kva, size, 550 flags & UVM_KMF_TYPEMASK); 551 return (0); 552 } else { 553 uvm_wait("km_getwait2"); /* sleep here */ 554 continue; 555 } 556 } 557 558 pg->flags &= ~PG_BUSY; /* new page */ 559 UVM_PAGE_OWN(pg, NULL); 560 561 /* 562 * map it in 563 */ 564 565 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 566 prot, PMAP_KMPAGE); 567 loopva += PAGE_SIZE; 568 offset += PAGE_SIZE; 569 loopsize -= PAGE_SIZE; 570 } 571 572 pmap_update(pmap_kernel()); 573 574 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 575 return(kva); 576} 577 578/* 579 * uvm_km_free: free an area of kernel memory 580 */ 581 582void 583uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 584{ 585 586 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 587 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 588 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 589 KASSERT((addr & PAGE_MASK) == 0); 590 KASSERT(vm_map_pmap(map) == pmap_kernel()); 591 592 size = round_page(size); 593 594 if (flags & UVM_KMF_PAGEABLE) { 595 uvm_km_pgremove(addr, addr + size); 596 } else if (flags & UVM_KMF_WIRED) { 597 /* 598 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 599 * remove it after. See comment below about KVA visibility. 600 */ 601 uvm_km_pgremove_intrsafe(map, addr, addr + size); 602 pmap_kremove(addr, size); 603 } 604 605 /* 606 * Note: uvm_unmap_remove() calls pmap_update() for us, before 607 * KVA becomes globally available. 608 */ 609 610 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 611} 612 613/* Sanity; must specify both or none. */ 614#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 615 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 616#error Must specify MAP and UNMAP together. 617#endif 618 619int 620uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 621 vmem_addr_t *addr) 622{ 623 struct vm_page *pg; 624 vmem_addr_t va; 625 int rc; 626 vaddr_t loopva; 627 vsize_t loopsize; 628 629 size = round_page(size); 630 631#if defined(PMAP_MAP_POOLPAGE) 632 if (size == PAGE_SIZE) { 633again: 634#ifdef PMAP_ALLOC_POOLPAGE 635 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 636 0 : UVM_PGA_USERESERVE); 637#else 638 pg = uvm_pagealloc(NULL, 0, NULL, 639 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 640#endif /* PMAP_ALLOC_POOLPAGE */ 641 if (__predict_false(pg == NULL)) { 642 if (flags & VM_SLEEP) { 643 uvm_wait("plpg"); 644 goto again; 645 } 646 } 647 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 648 if (__predict_false(va == 0)) { 649 uvm_pagefree(pg); 650 return ENOMEM; 651 } 652 *addr = va; 653 return 0; 654 } 655#endif /* PMAP_MAP_POOLPAGE */ 656 657 rc = vmem_alloc(vm, size, flags, &va); 658 if (rc != 0) 659 return rc; 660 661 loopva = va; 662 loopsize = size; 663 664 while (loopsize) { 665 KASSERT(!pmap_extract(pmap_kernel(), loopva, NULL)); 666 667 pg = uvm_pagealloc(NULL, 0, NULL, 668 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 669 if (__predict_false(pg == NULL)) { 670 if (flags & VM_SLEEP) { 671 uvm_wait("plpg"); 672 continue; 673 } else { 674 uvm_km_pgremove_intrsafe(kernel_map, va, 675 va + size); 676 pmap_kremove(va, size); 677 vmem_free(kmem_va_arena, va, size); 678 return ENOMEM; 679 } 680 } 681 682 pg->flags &= ~PG_BUSY; /* new page */ 683 UVM_PAGE_OWN(pg, NULL); 684 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 685 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 686 687 loopva += PAGE_SIZE; 688 loopsize -= PAGE_SIZE; 689 } 690 pmap_update(pmap_kernel()); 691 692 *addr = va; 693 694 return 0; 695} 696 697void 698uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 699{ 700 701 size = round_page(size); 702#if defined(PMAP_UNMAP_POOLPAGE) 703 if (size == PAGE_SIZE) { 704 paddr_t pa; 705 706 pa = PMAP_UNMAP_POOLPAGE(addr); 707 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 708 return; 709 } 710#endif /* PMAP_UNMAP_POOLPAGE */ 711 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 712 pmap_kremove(addr, size); 713 pmap_update(pmap_kernel()); 714 715 vmem_free(vm, addr, size); 716} 717 718bool 719uvm_km_va_starved_p(void) 720{ 721 vmem_size_t total; 722 vmem_size_t free; 723 724 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 725 free = vmem_size(kmem_arena, VMEM_FREE); 726 727 return (free < (total / 10)); 728} 729 730