uvm_km.c revision 1.120
1/* $NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps, including: 87 * pager_map => used to map "buf" structures into kernel space 88 * exec_map => used during exec to handle exec args 89 * etc... 90 * 91 * the kernel allocates its private memory out of special uvm_objects whose 92 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 93 * are "special" and never die). all kernel objects should be thought of 94 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 95 * object is equal to the size of kernel virtual address space (i.e. the 96 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 97 * 98 * note that just because a kernel object spans the entire kernel virtual 99 * address space doesn't mean that it has to be mapped into the entire space. 100 * large chunks of a kernel object's space go unused either because 101 * that area of kernel VM is unmapped, or there is some other type of 102 * object mapped into that range (e.g. a vnode). for submap's kernel 103 * objects, the only part of the object that can ever be populated is the 104 * offsets that are managed by the submap. 105 * 106 * note that the "offset" in a kernel object is always the kernel virtual 107 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 108 * example: 109 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 110 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 111 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 112 * then that means that the page at offset 0x235000 in kernel_object is 113 * mapped at 0xf8235000. 114 * 115 * kernel object have one other special property: when the kernel virtual 116 * memory mapping them is unmapped, the backing memory in the object is 117 * freed right away. this is done with the uvm_km_pgremove() function. 118 * this has to be done because there is no backing store for kernel pages 119 * and no need to save them after they are no longer referenced. 120 */ 121 122#include <sys/cdefs.h> 123__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $"); 124 125#include "opt_uvmhist.h" 126 127#include "opt_kmempages.h" 128 129#ifndef NKMEMPAGES 130#define NKMEMPAGES 0 131#endif 132 133/* 134 * Defaults for lower and upper-bounds for the kmem_arena page count. 135 * Can be overridden by kernel config options. 136 */ 137#ifndef NKMEMPAGES_MIN 138#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 139#endif 140 141#ifndef NKMEMPAGES_MAX 142#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 143#endif 144 145 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/proc.h> 149#include <sys/pool.h> 150#include <sys/vmem.h> 151#include <sys/kmem.h> 152 153#include <uvm/uvm.h> 154 155/* 156 * global data structures 157 */ 158 159struct vm_map *kernel_map = NULL; 160 161/* 162 * local data structues 163 */ 164 165static struct vm_map kernel_map_store; 166static struct vm_map_entry kernel_image_mapent_store; 167static struct vm_map_entry kernel_kmem_mapent_store; 168 169int nkmempages = 0; 170vaddr_t kmembase; 171vsize_t kmemsize; 172 173vmem_t *kmem_arena; 174vmem_t *kmem_va_arena; 175 176/* 177 * kmeminit_nkmempages: calculate the size of kmem_arena. 178 */ 179void 180kmeminit_nkmempages(void) 181{ 182 int npages; 183 184 if (nkmempages != 0) { 185 /* 186 * It's already been set (by us being here before) 187 * bail out now; 188 */ 189 return; 190 } 191 192#if defined(PMAP_MAP_POOLPAGE) 193 npages = (physmem / 4); 194#else 195 npages = (physmem / 3) * 2; 196#endif /* defined(PMAP_MAP_POOLPAGE) */ 197 198#ifndef NKMEMPAGES_MAX_UNLIMITED 199 if (npages > NKMEMPAGES_MAX) 200 npages = NKMEMPAGES_MAX; 201#endif 202 203 if (npages < NKMEMPAGES_MIN) 204 npages = NKMEMPAGES_MIN; 205 206 nkmempages = npages; 207} 208 209/* 210 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 211 * KVM already allocated for text, data, bss, and static data structures). 212 * 213 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 214 * we assume that [vmin -> start] has already been allocated and that 215 * "end" is the end. 216 */ 217 218void 219uvm_km_bootstrap(vaddr_t start, vaddr_t end) 220{ 221 bool kmem_arena_small; 222 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 223 struct uvm_map_args args; 224 int error; 225 226 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 227 UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR, 228 start, end, 0,0); 229 230 kmeminit_nkmempages(); 231 kmemsize = (vsize_t)nkmempages * PAGE_SIZE; 232 kmem_arena_small = kmemsize < 64 * 1024 * 1024; 233 234 UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0); 235 236 /* 237 * next, init kernel memory objects. 238 */ 239 240 /* kernel_object: for pageable anonymous kernel memory */ 241 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 242 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 243 244 /* 245 * init the map and reserve any space that might already 246 * have been allocated kernel space before installing. 247 */ 248 249 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 250 kernel_map_store.pmap = pmap_kernel(); 251 if (start != base) { 252 error = uvm_map_prepare(&kernel_map_store, 253 base, start - base, 254 NULL, UVM_UNKNOWN_OFFSET, 0, 255 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 256 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 257 if (!error) { 258 kernel_image_mapent_store.flags = 259 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 260 error = uvm_map_enter(&kernel_map_store, &args, 261 &kernel_image_mapent_store); 262 } 263 264 if (error) 265 panic( 266 "uvm_km_bootstrap: could not reserve space for kernel"); 267 268 kmembase = args.uma_start + args.uma_size; 269 } else { 270 kmembase = base; 271 } 272 273 error = uvm_map_prepare(&kernel_map_store, 274 kmembase, kmemsize, 275 NULL, UVM_UNKNOWN_OFFSET, 0, 276 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 277 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 278 if (!error) { 279 kernel_kmem_mapent_store.flags = 280 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 281 error = uvm_map_enter(&kernel_map_store, &args, 282 &kernel_kmem_mapent_store); 283 } 284 285 if (error) 286 panic("uvm_km_bootstrap: could not reserve kernel kmem"); 287 288 /* 289 * install! 290 */ 291 292 kernel_map = &kernel_map_store; 293 294 pool_subsystem_init(); 295 vmem_bootstrap(); 296 297 kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE, 298 NULL, NULL, NULL, 299 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 300 301 vmem_init(kmem_arena); 302 303 UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR 304 ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0); 305 306 kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 307 vmem_alloc, vmem_free, kmem_arena, 308 (kmem_arena_small ? 4 : 8) * PAGE_SIZE, 309 VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 310 311 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 312} 313 314/* 315 * uvm_km_init: init the kernel maps virtual memory caches 316 * and start the pool/kmem allocator. 317 */ 318void 319uvm_km_init(void) 320{ 321 322 kmem_init(); 323 324 kmeminit(); // killme 325} 326 327/* 328 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 329 * is allocated all references to that area of VM must go through it. this 330 * allows the locking of VAs in kernel_map to be broken up into regions. 331 * 332 * => if `fixed' is true, *vmin specifies where the region described 333 * pager_map => used to map "buf" structures into kernel space 334 * by the submap must start 335 * => if submap is non NULL we use that as the submap, otherwise we 336 * alloc a new map 337 */ 338 339struct vm_map * 340uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 341 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 342 struct vm_map *submap) 343{ 344 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 345 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 346 347 KASSERT(vm_map_pmap(map) == pmap_kernel()); 348 349 size = round_page(size); /* round up to pagesize */ 350 351 /* 352 * first allocate a blank spot in the parent map 353 */ 354 355 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 356 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 357 UVM_ADV_RANDOM, mapflags)) != 0) { 358 panic("%s: unable to allocate space in parent map", __func__); 359 } 360 361 /* 362 * set VM bounds (vmin is filled in by uvm_map) 363 */ 364 365 *vmax = *vmin + size; 366 367 /* 368 * add references to pmap and create or init the submap 369 */ 370 371 pmap_reference(vm_map_pmap(map)); 372 if (submap == NULL) { 373 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 374 if (submap == NULL) 375 panic("uvm_km_suballoc: unable to create submap"); 376 } 377 uvm_map_setup(submap, *vmin, *vmax, flags); 378 submap->pmap = vm_map_pmap(map); 379 380 /* 381 * now let uvm_map_submap plug in it... 382 */ 383 384 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 385 panic("uvm_km_suballoc: submap allocation failed"); 386 387 return(submap); 388} 389 390/* 391 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 392 */ 393 394void 395uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 396{ 397 struct uvm_object * const uobj = uvm_kernel_object; 398 const voff_t start = startva - vm_map_min(kernel_map); 399 const voff_t end = endva - vm_map_min(kernel_map); 400 struct vm_page *pg; 401 voff_t curoff, nextoff; 402 int swpgonlydelta = 0; 403 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 404 405 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 406 KASSERT(startva < endva); 407 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 408 409 mutex_enter(uobj->vmobjlock); 410 pmap_remove(pmap_kernel(), startva, endva); 411 for (curoff = start; curoff < end; curoff = nextoff) { 412 nextoff = curoff + PAGE_SIZE; 413 pg = uvm_pagelookup(uobj, curoff); 414 if (pg != NULL && pg->flags & PG_BUSY) { 415 pg->flags |= PG_WANTED; 416 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 417 "km_pgrm", 0); 418 mutex_enter(uobj->vmobjlock); 419 nextoff = curoff; 420 continue; 421 } 422 423 /* 424 * free the swap slot, then the page. 425 */ 426 427 if (pg == NULL && 428 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 429 swpgonlydelta++; 430 } 431 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 432 if (pg != NULL) { 433 mutex_enter(&uvm_pageqlock); 434 uvm_pagefree(pg); 435 mutex_exit(&uvm_pageqlock); 436 } 437 } 438 mutex_exit(uobj->vmobjlock); 439 440 if (swpgonlydelta > 0) { 441 mutex_enter(&uvm_swap_data_lock); 442 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 443 uvmexp.swpgonly -= swpgonlydelta; 444 mutex_exit(&uvm_swap_data_lock); 445 } 446} 447 448 449/* 450 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 451 * regions. 452 * 453 * => when you unmap a part of anonymous kernel memory you want to toss 454 * the pages right away. (this is called from uvm_unmap_...). 455 * => none of the pages will ever be busy, and none of them will ever 456 * be on the active or inactive queues (because they have no object). 457 */ 458 459void 460uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 461{ 462 struct vm_page *pg; 463 paddr_t pa; 464 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 465 466 KASSERT(VM_MAP_IS_KERNEL(map)); 467 KASSERT(vm_map_min(map) <= start); 468 KASSERT(start < end); 469 KASSERT(end <= vm_map_max(map)); 470 471 for (; start < end; start += PAGE_SIZE) { 472 if (!pmap_extract(pmap_kernel(), start, &pa)) { 473 continue; 474 } 475 pg = PHYS_TO_VM_PAGE(pa); 476 KASSERT(pg); 477 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 478 KASSERT((pg->flags & PG_BUSY) == 0); 479 uvm_pagefree(pg); 480 } 481} 482 483#if defined(DEBUG) 484void 485uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 486{ 487 struct vm_page *pg; 488 vaddr_t va; 489 paddr_t pa; 490 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 491 492 KDASSERT(VM_MAP_IS_KERNEL(map)); 493 KDASSERT(vm_map_min(map) <= start); 494 KDASSERT(start < end); 495 KDASSERT(end <= vm_map_max(map)); 496 497 for (va = start; va < end; va += PAGE_SIZE) { 498 if (pmap_extract(pmap_kernel(), va, &pa)) { 499 panic("uvm_km_check_empty: va %p has pa 0x%llx", 500 (void *)va, (long long)pa); 501 } 502 if ((map->flags & VM_MAP_INTRSAFE) == 0) { 503 mutex_enter(uvm_kernel_object->vmobjlock); 504 pg = uvm_pagelookup(uvm_kernel_object, 505 va - vm_map_min(kernel_map)); 506 mutex_exit(uvm_kernel_object->vmobjlock); 507 if (pg) { 508 panic("uvm_km_check_empty: " 509 "has page hashed at %p", (const void *)va); 510 } 511 } 512 } 513} 514#endif /* defined(DEBUG) */ 515 516/* 517 * uvm_km_alloc: allocate an area of kernel memory. 518 * 519 * => NOTE: we can return 0 even if we can wait if there is not enough 520 * free VM space in the map... caller should be prepared to handle 521 * this case. 522 * => we return KVA of memory allocated 523 */ 524 525vaddr_t 526uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 527{ 528 vaddr_t kva, loopva; 529 vaddr_t offset; 530 vsize_t loopsize; 531 struct vm_page *pg; 532 struct uvm_object *obj; 533 int pgaflags; 534 vm_prot_t prot; 535 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 536 537 KASSERT(vm_map_pmap(map) == pmap_kernel()); 538 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 539 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 540 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 541 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 542 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 543 544 /* 545 * setup for call 546 */ 547 548 kva = vm_map_min(map); /* hint */ 549 size = round_page(size); 550 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 551 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 552 map, obj, size, flags); 553 554 /* 555 * allocate some virtual space 556 */ 557 558 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 559 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 560 UVM_ADV_RANDOM, 561 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 562 | UVM_KMF_COLORMATCH)))) != 0)) { 563 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 564 return(0); 565 } 566 567 /* 568 * if all we wanted was VA, return now 569 */ 570 571 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 572 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 573 return(kva); 574 } 575 576 /* 577 * recover object offset from virtual address 578 */ 579 580 offset = kva - vm_map_min(kernel_map); 581 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 582 583 /* 584 * now allocate and map in the memory... note that we are the only ones 585 * whom should ever get a handle on this area of VM. 586 */ 587 588 loopva = kva; 589 loopsize = size; 590 591 pgaflags = UVM_FLAG_COLORMATCH; 592 if (flags & UVM_KMF_NOWAIT) 593 pgaflags |= UVM_PGA_USERESERVE; 594 if (flags & UVM_KMF_ZERO) 595 pgaflags |= UVM_PGA_ZERO; 596 prot = VM_PROT_READ | VM_PROT_WRITE; 597 if (flags & UVM_KMF_EXEC) 598 prot |= VM_PROT_EXECUTE; 599 while (loopsize) { 600 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 601 "loopva=%#"PRIxVADDR, loopva); 602 603 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 604#ifdef UVM_KM_VMFREELIST 605 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 606#else 607 UVM_PGA_STRAT_NORMAL, 0 608#endif 609 ); 610 611 /* 612 * out of memory? 613 */ 614 615 if (__predict_false(pg == NULL)) { 616 if ((flags & UVM_KMF_NOWAIT) || 617 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 618 /* free everything! */ 619 uvm_km_free(map, kva, size, 620 flags & UVM_KMF_TYPEMASK); 621 return (0); 622 } else { 623 uvm_wait("km_getwait2"); /* sleep here */ 624 continue; 625 } 626 } 627 628 pg->flags &= ~PG_BUSY; /* new page */ 629 UVM_PAGE_OWN(pg, NULL); 630 631 /* 632 * map it in 633 */ 634 635 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 636 prot, PMAP_KMPAGE); 637 loopva += PAGE_SIZE; 638 offset += PAGE_SIZE; 639 loopsize -= PAGE_SIZE; 640 } 641 642 pmap_update(pmap_kernel()); 643 644 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 645 return(kva); 646} 647 648/* 649 * uvm_km_free: free an area of kernel memory 650 */ 651 652void 653uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 654{ 655 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 656 657 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 658 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 659 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 660 KASSERT((addr & PAGE_MASK) == 0); 661 KASSERT(vm_map_pmap(map) == pmap_kernel()); 662 663 size = round_page(size); 664 665 if (flags & UVM_KMF_PAGEABLE) { 666 uvm_km_pgremove(addr, addr + size); 667 } else if (flags & UVM_KMF_WIRED) { 668 /* 669 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 670 * remove it after. See comment below about KVA visibility. 671 */ 672 uvm_km_pgremove_intrsafe(map, addr, addr + size); 673 pmap_kremove(addr, size); 674 } 675 676 /* 677 * Note: uvm_unmap_remove() calls pmap_update() for us, before 678 * KVA becomes globally available. 679 */ 680 681 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 682} 683 684/* Sanity; must specify both or none. */ 685#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 686 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 687#error Must specify MAP and UNMAP together. 688#endif 689 690int 691uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 692 vmem_addr_t *addr) 693{ 694 struct vm_page *pg; 695 vmem_addr_t va; 696 int rc; 697 vaddr_t loopva; 698 vsize_t loopsize; 699 700 size = round_page(size); 701 702#if defined(PMAP_MAP_POOLPAGE) 703 if (size == PAGE_SIZE) { 704again: 705#ifdef PMAP_ALLOC_POOLPAGE 706 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 707 0 : UVM_PGA_USERESERVE); 708#else 709 pg = uvm_pagealloc(NULL, 0, NULL, 710 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 711#endif /* PMAP_ALLOC_POOLPAGE */ 712 if (__predict_false(pg == NULL)) { 713 if (flags & VM_SLEEP) { 714 uvm_wait("plpg"); 715 goto again; 716 } 717 } 718 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 719 if (__predict_false(va == 0)) { 720 uvm_pagefree(pg); 721 return ENOMEM; 722 } 723 *addr = va; 724 return 0; 725 } 726#endif /* PMAP_MAP_POOLPAGE */ 727 728 rc = vmem_alloc(vm, size, flags, &va); 729 if (rc != 0) 730 return rc; 731 732 loopva = va; 733 loopsize = size; 734 735 while (loopsize) { 736 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 737 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE" vmem=%p", 738 loopva, loopsize, vm); 739 740 pg = uvm_pagealloc(NULL, loopva, NULL, 741 UVM_FLAG_COLORMATCH 742 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 743 if (__predict_false(pg == NULL)) { 744 if (flags & VM_SLEEP) { 745 uvm_wait("plpg"); 746 continue; 747 } else { 748 uvm_km_pgremove_intrsafe(kernel_map, va, 749 va + size); 750 pmap_kremove(va, size); 751 vmem_free(kmem_va_arena, va, size); 752 return ENOMEM; 753 } 754 } 755 756 pg->flags &= ~PG_BUSY; /* new page */ 757 UVM_PAGE_OWN(pg, NULL); 758 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 759 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 760 761 loopva += PAGE_SIZE; 762 loopsize -= PAGE_SIZE; 763 } 764 pmap_update(pmap_kernel()); 765 766 *addr = va; 767 768 return 0; 769} 770 771void 772uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 773{ 774 775 size = round_page(size); 776#if defined(PMAP_UNMAP_POOLPAGE) 777 if (size == PAGE_SIZE) { 778 paddr_t pa; 779 780 pa = PMAP_UNMAP_POOLPAGE(addr); 781 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 782 return; 783 } 784#endif /* PMAP_UNMAP_POOLPAGE */ 785 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 786 pmap_kremove(addr, size); 787 pmap_update(pmap_kernel()); 788 789 vmem_free(vm, addr, size); 790} 791 792bool 793uvm_km_va_starved_p(void) 794{ 795 vmem_size_t total; 796 vmem_size_t free; 797 798 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 799 free = vmem_size(kmem_arena, VMEM_FREE); 800 801 return (free < (total / 10)); 802} 803 804