uvm_km.c revision 1.117
1/* $NetBSD: uvm_km.c,v 1.117 2012/02/02 18:59:45 para Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps, including: 87 * pager_map => used to map "buf" structures into kernel space 88 * exec_map => used during exec to handle exec args 89 * etc... 90 * 91 * the kernel allocates its private memory out of special uvm_objects whose 92 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 93 * are "special" and never die). all kernel objects should be thought of 94 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 95 * object is equal to the size of kernel virtual address space (i.e. the 96 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 97 * 98 * note that just because a kernel object spans the entire kernel virtual 99 * address space doesn't mean that it has to be mapped into the entire space. 100 * large chunks of a kernel object's space go unused either because 101 * that area of kernel VM is unmapped, or there is some other type of 102 * object mapped into that range (e.g. a vnode). for submap's kernel 103 * objects, the only part of the object that can ever be populated is the 104 * offsets that are managed by the submap. 105 * 106 * note that the "offset" in a kernel object is always the kernel virtual 107 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 108 * example: 109 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 110 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 111 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 112 * then that means that the page at offset 0x235000 in kernel_object is 113 * mapped at 0xf8235000. 114 * 115 * kernel object have one other special property: when the kernel virtual 116 * memory mapping them is unmapped, the backing memory in the object is 117 * freed right away. this is done with the uvm_km_pgremove() function. 118 * this has to be done because there is no backing store for kernel pages 119 * and no need to save them after they are no longer referenced. 120 */ 121 122#include <sys/cdefs.h> 123__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.117 2012/02/02 18:59:45 para Exp $"); 124 125#include "opt_uvmhist.h" 126 127#include "opt_kmempages.h" 128 129#ifndef NKMEMPAGES 130#define NKMEMPAGES 0 131#endif 132 133/* 134 * Defaults for lower and upper-bounds for the kmem_arena page count. 135 * Can be overridden by kernel config options. 136 */ 137#ifndef NKMEMPAGES_MIN 138#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 139#endif 140 141#ifndef NKMEMPAGES_MAX 142#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 143#endif 144 145 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/proc.h> 149#include <sys/pool.h> 150#include <sys/vmem.h> 151#include <sys/kmem.h> 152 153#include <uvm/uvm.h> 154 155/* 156 * global data structures 157 */ 158 159struct vm_map *kernel_map = NULL; 160 161/* 162 * local data structues 163 */ 164 165static struct vm_map kernel_map_store; 166static struct vm_map_entry kernel_image_mapent_store; 167static struct vm_map_entry kernel_kmem_mapent_store; 168 169int nkmempages = 0; 170vaddr_t kmembase; 171vsize_t kmemsize; 172 173vmem_t *kmem_arena; 174vmem_t *kmem_va_arena; 175 176/* 177 * kmeminit_nkmempages: calculate the size of kmem_arena. 178 */ 179void 180kmeminit_nkmempages(void) 181{ 182 int npages; 183 184 if (nkmempages != 0) { 185 /* 186 * It's already been set (by us being here before) 187 * bail out now; 188 */ 189 return; 190 } 191 192 npages = physmem; 193 194 if (npages > NKMEMPAGES_MAX) 195 npages = NKMEMPAGES_MAX; 196 197 if (npages < NKMEMPAGES_MIN) 198 npages = NKMEMPAGES_MIN; 199 200 nkmempages = npages; 201} 202 203/* 204 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 205 * KVM already allocated for text, data, bss, and static data structures). 206 * 207 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 208 * we assume that [vmin -> start] has already been allocated and that 209 * "end" is the end. 210 */ 211 212void 213uvm_km_bootstrap(vaddr_t start, vaddr_t end) 214{ 215 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 216 217 kmeminit_nkmempages(); 218 kmemsize = nkmempages * PAGE_SIZE; 219 220 /* kmemsize = MIN((((vsize_t)(end - start)) / 3), 221 ((((vsize_t)uvmexp.npages) * PAGE_SIZE) / 2)); 222 kmemsize = round_page(kmemsize); */ 223 224 /* 225 * next, init kernel memory objects. 226 */ 227 228 /* kernel_object: for pageable anonymous kernel memory */ 229 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 230 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 231 232 /* 233 * init the map and reserve any space that might already 234 * have been allocated kernel space before installing. 235 */ 236 237 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 238 kernel_map_store.pmap = pmap_kernel(); 239 if (start != base) { 240 int error; 241 struct uvm_map_args args; 242 243 error = uvm_map_prepare(&kernel_map_store, 244 base, start - base, 245 NULL, UVM_UNKNOWN_OFFSET, 0, 246 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 247 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 248 if (!error) { 249 kernel_image_mapent_store.flags = 250 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 251 error = uvm_map_enter(&kernel_map_store, &args, 252 &kernel_image_mapent_store); 253 } 254 255 if (error) 256 panic( 257 "uvm_km_bootstrap: could not reserve space for kernel"); 258 259 kmembase = args.uma_start + args.uma_size; 260 error = uvm_map_prepare(&kernel_map_store, 261 kmembase, kmemsize, 262 NULL, UVM_UNKNOWN_OFFSET, 0, 263 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 264 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 265 if (!error) { 266 kernel_kmem_mapent_store.flags = 267 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 268 error = uvm_map_enter(&kernel_map_store, &args, 269 &kernel_kmem_mapent_store); 270 } 271 272 if (error) 273 panic( 274 "uvm_km_bootstrap: could not reserve kernel kmem"); 275 } else { 276 kmembase = base; 277 } 278 279 /* 280 * install! 281 */ 282 283 kernel_map = &kernel_map_store; 284 285 pool_subsystem_init(); 286 vmem_bootstrap(); 287 288 kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE, 289 NULL, NULL, NULL, 290 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 291 292 vmem_init(kmem_arena); 293 294 kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 295 vmem_alloc, vmem_free, kmem_arena, 296 16 * PAGE_SIZE, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 297} 298 299/* 300 * uvm_km_init: init the kernel maps virtual memory caches 301 * and start the pool/kmem allocator. 302 */ 303void 304uvm_km_init(void) 305{ 306 307 kmem_init(); 308 309 kmeminit(); // killme 310} 311 312/* 313 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 314 * is allocated all references to that area of VM must go through it. this 315 * allows the locking of VAs in kernel_map to be broken up into regions. 316 * 317 * => if `fixed' is true, *vmin specifies where the region described 318 * pager_map => used to map "buf" structures into kernel space 319 * by the submap must start 320 * => if submap is non NULL we use that as the submap, otherwise we 321 * alloc a new map 322 */ 323 324struct vm_map * 325uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 326 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 327 struct vm_map *submap) 328{ 329 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 330 331 KASSERT(vm_map_pmap(map) == pmap_kernel()); 332 333 size = round_page(size); /* round up to pagesize */ 334 335 /* 336 * first allocate a blank spot in the parent map 337 */ 338 339 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 340 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 341 UVM_ADV_RANDOM, mapflags)) != 0) { 342 panic("uvm_km_suballoc: unable to allocate space in parent map"); 343 } 344 345 /* 346 * set VM bounds (vmin is filled in by uvm_map) 347 */ 348 349 *vmax = *vmin + size; 350 351 /* 352 * add references to pmap and create or init the submap 353 */ 354 355 pmap_reference(vm_map_pmap(map)); 356 if (submap == NULL) { 357 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 358 if (submap == NULL) 359 panic("uvm_km_suballoc: unable to create submap"); 360 } 361 uvm_map_setup(submap, *vmin, *vmax, flags); 362 submap->pmap = vm_map_pmap(map); 363 364 /* 365 * now let uvm_map_submap plug in it... 366 */ 367 368 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 369 panic("uvm_km_suballoc: submap allocation failed"); 370 371 return(submap); 372} 373 374/* 375 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 376 */ 377 378void 379uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 380{ 381 struct uvm_object * const uobj = uvm_kernel_object; 382 const voff_t start = startva - vm_map_min(kernel_map); 383 const voff_t end = endva - vm_map_min(kernel_map); 384 struct vm_page *pg; 385 voff_t curoff, nextoff; 386 int swpgonlydelta = 0; 387 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 388 389 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 390 KASSERT(startva < endva); 391 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 392 393 mutex_enter(uobj->vmobjlock); 394 pmap_remove(pmap_kernel(), startva, endva); 395 for (curoff = start; curoff < end; curoff = nextoff) { 396 nextoff = curoff + PAGE_SIZE; 397 pg = uvm_pagelookup(uobj, curoff); 398 if (pg != NULL && pg->flags & PG_BUSY) { 399 pg->flags |= PG_WANTED; 400 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 401 "km_pgrm", 0); 402 mutex_enter(uobj->vmobjlock); 403 nextoff = curoff; 404 continue; 405 } 406 407 /* 408 * free the swap slot, then the page. 409 */ 410 411 if (pg == NULL && 412 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 413 swpgonlydelta++; 414 } 415 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 416 if (pg != NULL) { 417 mutex_enter(&uvm_pageqlock); 418 uvm_pagefree(pg); 419 mutex_exit(&uvm_pageqlock); 420 } 421 } 422 mutex_exit(uobj->vmobjlock); 423 424 if (swpgonlydelta > 0) { 425 mutex_enter(&uvm_swap_data_lock); 426 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 427 uvmexp.swpgonly -= swpgonlydelta; 428 mutex_exit(&uvm_swap_data_lock); 429 } 430} 431 432 433/* 434 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 435 * regions. 436 * 437 * => when you unmap a part of anonymous kernel memory you want to toss 438 * the pages right away. (this is called from uvm_unmap_...). 439 * => none of the pages will ever be busy, and none of them will ever 440 * be on the active or inactive queues (because they have no object). 441 */ 442 443void 444uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 445{ 446 struct vm_page *pg; 447 paddr_t pa; 448 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 449 450 KASSERT(VM_MAP_IS_KERNEL(map)); 451 KASSERT(vm_map_min(map) <= start); 452 KASSERT(start < end); 453 KASSERT(end <= vm_map_max(map)); 454 455 for (; start < end; start += PAGE_SIZE) { 456 if (!pmap_extract(pmap_kernel(), start, &pa)) { 457 continue; 458 } 459 pg = PHYS_TO_VM_PAGE(pa); 460 KASSERT(pg); 461 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 462 KASSERT((pg->flags & PG_BUSY) == 0); 463 uvm_pagefree(pg); 464 } 465} 466 467#if defined(DEBUG) 468void 469uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 470{ 471 struct vm_page *pg; 472 vaddr_t va; 473 paddr_t pa; 474 475 KDASSERT(VM_MAP_IS_KERNEL(map)); 476 KDASSERT(vm_map_min(map) <= start); 477 KDASSERT(start < end); 478 KDASSERT(end <= vm_map_max(map)); 479 480 for (va = start; va < end; va += PAGE_SIZE) { 481 if (pmap_extract(pmap_kernel(), va, &pa)) { 482 panic("uvm_km_check_empty: va %p has pa 0x%llx", 483 (void *)va, (long long)pa); 484 } 485 if ((map->flags & VM_MAP_INTRSAFE) == 0) { 486 mutex_enter(uvm_kernel_object->vmobjlock); 487 pg = uvm_pagelookup(uvm_kernel_object, 488 va - vm_map_min(kernel_map)); 489 mutex_exit(uvm_kernel_object->vmobjlock); 490 if (pg) { 491 panic("uvm_km_check_empty: " 492 "has page hashed at %p", (const void *)va); 493 } 494 } 495 } 496} 497#endif /* defined(DEBUG) */ 498 499/* 500 * uvm_km_alloc: allocate an area of kernel memory. 501 * 502 * => NOTE: we can return 0 even if we can wait if there is not enough 503 * free VM space in the map... caller should be prepared to handle 504 * this case. 505 * => we return KVA of memory allocated 506 */ 507 508vaddr_t 509uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 510{ 511 vaddr_t kva, loopva; 512 vaddr_t offset; 513 vsize_t loopsize; 514 struct vm_page *pg; 515 struct uvm_object *obj; 516 int pgaflags; 517 vm_prot_t prot; 518 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 519 520 KASSERT(vm_map_pmap(map) == pmap_kernel()); 521 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 522 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 523 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 524 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 525 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 526 527 /* 528 * setup for call 529 */ 530 531 kva = vm_map_min(map); /* hint */ 532 size = round_page(size); 533 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 534 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 535 map, obj, size, flags); 536 537 /* 538 * allocate some virtual space 539 */ 540 541 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 542 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 543 UVM_ADV_RANDOM, 544 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 545 | UVM_KMF_COLORMATCH)))) != 0)) { 546 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 547 return(0); 548 } 549 550 /* 551 * if all we wanted was VA, return now 552 */ 553 554 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 555 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 556 return(kva); 557 } 558 559 /* 560 * recover object offset from virtual address 561 */ 562 563 offset = kva - vm_map_min(kernel_map); 564 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 565 566 /* 567 * now allocate and map in the memory... note that we are the only ones 568 * whom should ever get a handle on this area of VM. 569 */ 570 571 loopva = kva; 572 loopsize = size; 573 574 pgaflags = UVM_FLAG_COLORMATCH; 575 if (flags & UVM_KMF_NOWAIT) 576 pgaflags |= UVM_PGA_USERESERVE; 577 if (flags & UVM_KMF_ZERO) 578 pgaflags |= UVM_PGA_ZERO; 579 prot = VM_PROT_READ | VM_PROT_WRITE; 580 if (flags & UVM_KMF_EXEC) 581 prot |= VM_PROT_EXECUTE; 582 while (loopsize) { 583 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 584 "loopva=%#"PRIxVADDR, loopva); 585 586 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 587#ifdef UVM_KM_VMFREELIST 588 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 589#else 590 UVM_PGA_STRAT_NORMAL, 0 591#endif 592 ); 593 594 /* 595 * out of memory? 596 */ 597 598 if (__predict_false(pg == NULL)) { 599 if ((flags & UVM_KMF_NOWAIT) || 600 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 601 /* free everything! */ 602 uvm_km_free(map, kva, size, 603 flags & UVM_KMF_TYPEMASK); 604 return (0); 605 } else { 606 uvm_wait("km_getwait2"); /* sleep here */ 607 continue; 608 } 609 } 610 611 pg->flags &= ~PG_BUSY; /* new page */ 612 UVM_PAGE_OWN(pg, NULL); 613 614 /* 615 * map it in 616 */ 617 618 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 619 prot, PMAP_KMPAGE); 620 loopva += PAGE_SIZE; 621 offset += PAGE_SIZE; 622 loopsize -= PAGE_SIZE; 623 } 624 625 pmap_update(pmap_kernel()); 626 627 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 628 return(kva); 629} 630 631/* 632 * uvm_km_free: free an area of kernel memory 633 */ 634 635void 636uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 637{ 638 639 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 640 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 641 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 642 KASSERT((addr & PAGE_MASK) == 0); 643 KASSERT(vm_map_pmap(map) == pmap_kernel()); 644 645 size = round_page(size); 646 647 if (flags & UVM_KMF_PAGEABLE) { 648 uvm_km_pgremove(addr, addr + size); 649 } else if (flags & UVM_KMF_WIRED) { 650 /* 651 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 652 * remove it after. See comment below about KVA visibility. 653 */ 654 uvm_km_pgremove_intrsafe(map, addr, addr + size); 655 pmap_kremove(addr, size); 656 } 657 658 /* 659 * Note: uvm_unmap_remove() calls pmap_update() for us, before 660 * KVA becomes globally available. 661 */ 662 663 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 664} 665 666/* Sanity; must specify both or none. */ 667#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 668 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 669#error Must specify MAP and UNMAP together. 670#endif 671 672int 673uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 674 vmem_addr_t *addr) 675{ 676 struct vm_page *pg; 677 vmem_addr_t va; 678 int rc; 679 vaddr_t loopva; 680 vsize_t loopsize; 681 682 size = round_page(size); 683 684#if defined(PMAP_MAP_POOLPAGE) 685 if (size == PAGE_SIZE) { 686again: 687#ifdef PMAP_ALLOC_POOLPAGE 688 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 689 0 : UVM_PGA_USERESERVE); 690#else 691 pg = uvm_pagealloc(NULL, 0, NULL, 692 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 693#endif /* PMAP_ALLOC_POOLPAGE */ 694 if (__predict_false(pg == NULL)) { 695 if (flags & VM_SLEEP) { 696 uvm_wait("plpg"); 697 goto again; 698 } 699 } 700 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 701 if (__predict_false(va == 0)) { 702 uvm_pagefree(pg); 703 return ENOMEM; 704 } 705 *addr = va; 706 return 0; 707 } 708#endif /* PMAP_MAP_POOLPAGE */ 709 710 rc = vmem_alloc(vm, size, flags, &va); 711 if (rc != 0) 712 return rc; 713 714 loopva = va; 715 loopsize = size; 716 717 while (loopsize) { 718 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 719 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE" vmem=%p", 720 loopva, loopsize, vm); 721 722 pg = uvm_pagealloc(NULL, loopva, NULL, 723 UVM_FLAG_COLORMATCH 724 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 725 if (__predict_false(pg == NULL)) { 726 if (flags & VM_SLEEP) { 727 uvm_wait("plpg"); 728 continue; 729 } else { 730 uvm_km_pgremove_intrsafe(kernel_map, va, 731 va + size); 732 pmap_kremove(va, size); 733 vmem_free(kmem_va_arena, va, size); 734 return ENOMEM; 735 } 736 } 737 738 pg->flags &= ~PG_BUSY; /* new page */ 739 UVM_PAGE_OWN(pg, NULL); 740 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 741 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 742 743 loopva += PAGE_SIZE; 744 loopsize -= PAGE_SIZE; 745 } 746 pmap_update(pmap_kernel()); 747 748 *addr = va; 749 750 return 0; 751} 752 753void 754uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 755{ 756 757 size = round_page(size); 758#if defined(PMAP_UNMAP_POOLPAGE) 759 if (size == PAGE_SIZE) { 760 paddr_t pa; 761 762 pa = PMAP_UNMAP_POOLPAGE(addr); 763 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 764 return; 765 } 766#endif /* PMAP_UNMAP_POOLPAGE */ 767 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 768 pmap_kremove(addr, size); 769 pmap_update(pmap_kernel()); 770 771 vmem_free(vm, addr, size); 772} 773 774bool 775uvm_km_va_starved_p(void) 776{ 777 vmem_size_t total; 778 vmem_size_t free; 779 780 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 781 free = vmem_size(kmem_arena, VMEM_FREE); 782 783 return (free < (total / 10)); 784} 785 786