uvm_km.c revision 1.134
1/* $NetBSD: uvm_km.c,v 1.134 2012/09/04 13:37:41 matt Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps/arenas, including: 87 * kmem_arena => used for kmem/pool (memoryallocators(9)) 88 * pager_map => used to map "buf" structures into kernel space 89 * exec_map => used during exec to handle exec args 90 * etc... 91 * 92 * The kmem_arena is a "special submap", as it lives in a fixed map entry 93 * within the kernel_map and is controlled by vmem(9). 94 * 95 * the kernel allocates its private memory out of special uvm_objects whose 96 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 97 * are "special" and never die). all kernel objects should be thought of 98 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 99 * object is equal to the size of kernel virtual address space (i.e. the 100 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 101 * 102 * note that just because a kernel object spans the entire kernel virtual 103 * address space doesn't mean that it has to be mapped into the entire space. 104 * large chunks of a kernel object's space go unused either because 105 * that area of kernel VM is unmapped, or there is some other type of 106 * object mapped into that range (e.g. a vnode). for submap's kernel 107 * objects, the only part of the object that can ever be populated is the 108 * offsets that are managed by the submap. 109 * 110 * note that the "offset" in a kernel object is always the kernel virtual 111 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 112 * example: 113 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 114 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 115 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 116 * then that means that the page at offset 0x235000 in kernel_object is 117 * mapped at 0xf8235000. 118 * 119 * kernel object have one other special property: when the kernel virtual 120 * memory mapping them is unmapped, the backing memory in the object is 121 * freed right away. this is done with the uvm_km_pgremove() function. 122 * this has to be done because there is no backing store for kernel pages 123 * and no need to save them after they are no longer referenced. 124 * 125 * Generic arenas: 126 * 127 * kmem_arena: 128 * Main arena controlling the kernel KVA used by other arenas. 129 * 130 * kmem_va_arena: 131 * Implements quantum caching in order to speedup allocations and 132 * reduce fragmentation. The pool(9), unless created with a custom 133 * meta-data allocator, and kmem(9) subsystems use this arena. 134 * 135 * Arenas for meta-data allocations are used by vmem(9) and pool(9). 136 * These arenas cannot use quantum cache. However, kmem_va_meta_arena 137 * compensates this by importing larger chunks from kmem_arena. 138 * 139 * kmem_va_meta_arena: 140 * Space for meta-data. 141 * 142 * kmem_meta_arena: 143 * Imports from kmem_va_meta_arena. Allocations from this arena are 144 * backed with the pages. 145 * 146 * Arena stacking: 147 * 148 * kmem_arena 149 * kmem_va_arena 150 * kmem_va_meta_arena 151 * kmem_meta_arena 152 */ 153 154#include <sys/cdefs.h> 155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.134 2012/09/04 13:37:41 matt Exp $"); 156 157#include "opt_uvmhist.h" 158 159#include "opt_kmempages.h" 160 161#ifndef NKMEMPAGES 162#define NKMEMPAGES 0 163#endif 164 165/* 166 * Defaults for lower and upper-bounds for the kmem_arena page count. 167 * Can be overridden by kernel config options. 168 */ 169#ifndef NKMEMPAGES_MIN 170#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 171#endif 172 173#ifndef NKMEMPAGES_MAX 174#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 175#endif 176 177 178#include <sys/param.h> 179#include <sys/systm.h> 180#include <sys/proc.h> 181#include <sys/pool.h> 182#include <sys/vmem.h> 183#include <sys/kmem.h> 184 185#include <uvm/uvm.h> 186 187/* 188 * global data structures 189 */ 190 191struct vm_map *kernel_map = NULL; 192 193/* 194 * local data structues 195 */ 196 197static struct vm_map kernel_map_store; 198static struct vm_map_entry kernel_image_mapent_store; 199static struct vm_map_entry kernel_kmem_mapent_store; 200 201int nkmempages = 0; 202vaddr_t kmembase; 203vsize_t kmemsize; 204 205vmem_t *kmem_arena; 206vmem_t *kmem_va_arena; 207 208/* 209 * kmeminit_nkmempages: calculate the size of kmem_arena. 210 */ 211void 212kmeminit_nkmempages(void) 213{ 214 int npages; 215 216 if (nkmempages != 0) { 217 /* 218 * It's already been set (by us being here before) 219 * bail out now; 220 */ 221 return; 222 } 223 224#if defined(PMAP_MAP_POOLPAGE) 225 npages = (physmem / 4); 226#else 227 npages = (physmem / 3) * 2; 228#endif /* defined(PMAP_MAP_POOLPAGE) */ 229 230#ifndef NKMEMPAGES_MAX_UNLIMITED 231 if (npages > NKMEMPAGES_MAX) 232 npages = NKMEMPAGES_MAX; 233#endif 234 235 if (npages < NKMEMPAGES_MIN) 236 npages = NKMEMPAGES_MIN; 237 238 nkmempages = npages; 239} 240 241/* 242 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 243 * KVM already allocated for text, data, bss, and static data structures). 244 * 245 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 246 * we assume that [vmin -> start] has already been allocated and that 247 * "end" is the end. 248 */ 249 250void 251uvm_km_bootstrap(vaddr_t start, vaddr_t end) 252{ 253 bool kmem_arena_small; 254 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 255 struct uvm_map_args args; 256 int error; 257 258 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 259 UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR, 260 start, end, 0,0); 261 262 kmeminit_nkmempages(); 263 kmemsize = (vsize_t)nkmempages * PAGE_SIZE; 264 kmem_arena_small = kmemsize < 64 * 1024 * 1024; 265 266 UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0); 267 268 /* 269 * next, init kernel memory objects. 270 */ 271 272 /* kernel_object: for pageable anonymous kernel memory */ 273 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 274 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 275 276 /* 277 * init the map and reserve any space that might already 278 * have been allocated kernel space before installing. 279 */ 280 281 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 282 kernel_map_store.pmap = pmap_kernel(); 283 if (start != base) { 284 error = uvm_map_prepare(&kernel_map_store, 285 base, start - base, 286 NULL, UVM_UNKNOWN_OFFSET, 0, 287 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 288 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 289 if (!error) { 290 kernel_image_mapent_store.flags = 291 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 292 error = uvm_map_enter(&kernel_map_store, &args, 293 &kernel_image_mapent_store); 294 } 295 296 if (error) 297 panic( 298 "uvm_km_bootstrap: could not reserve space for kernel"); 299 300 kmembase = args.uma_start + args.uma_size; 301 } else { 302 kmembase = base; 303 } 304 305 error = uvm_map_prepare(&kernel_map_store, 306 kmembase, kmemsize, 307 NULL, UVM_UNKNOWN_OFFSET, 0, 308 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 309 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 310 if (!error) { 311 kernel_kmem_mapent_store.flags = 312 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 313 error = uvm_map_enter(&kernel_map_store, &args, 314 &kernel_kmem_mapent_store); 315 } 316 317 if (error) 318 panic("uvm_km_bootstrap: could not reserve kernel kmem"); 319 320 /* 321 * install! 322 */ 323 324 kernel_map = &kernel_map_store; 325 326 pool_subsystem_init(); 327 vmem_bootstrap(); 328 329 kmem_arena = vmem_create("kmem", kmembase, kmemsize, PAGE_SIZE, 330 NULL, NULL, NULL, 331 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 332 333 vmem_init(kmem_arena); 334 335 UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR 336 ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0); 337 338 kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 339 vmem_alloc, vmem_free, kmem_arena, 340 (kmem_arena_small ? 4 : 8) * PAGE_SIZE, 341 VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 342 343 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 344} 345 346/* 347 * uvm_km_init: init the kernel maps virtual memory caches 348 * and start the pool/kmem allocator. 349 */ 350void 351uvm_km_init(void) 352{ 353 354 kmem_init(); 355 356 kmeminit(); // killme 357} 358 359/* 360 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 361 * is allocated all references to that area of VM must go through it. this 362 * allows the locking of VAs in kernel_map to be broken up into regions. 363 * 364 * => if `fixed' is true, *vmin specifies where the region described 365 * pager_map => used to map "buf" structures into kernel space 366 * by the submap must start 367 * => if submap is non NULL we use that as the submap, otherwise we 368 * alloc a new map 369 */ 370 371struct vm_map * 372uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 373 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 374 struct vm_map *submap) 375{ 376 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 377 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 378 379 KASSERT(vm_map_pmap(map) == pmap_kernel()); 380 381 size = round_page(size); /* round up to pagesize */ 382 383 /* 384 * first allocate a blank spot in the parent map 385 */ 386 387 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 388 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 389 UVM_ADV_RANDOM, mapflags)) != 0) { 390 panic("%s: unable to allocate space in parent map", __func__); 391 } 392 393 /* 394 * set VM bounds (vmin is filled in by uvm_map) 395 */ 396 397 *vmax = *vmin + size; 398 399 /* 400 * add references to pmap and create or init the submap 401 */ 402 403 pmap_reference(vm_map_pmap(map)); 404 if (submap == NULL) { 405 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 406 if (submap == NULL) 407 panic("uvm_km_suballoc: unable to create submap"); 408 } 409 uvm_map_setup(submap, *vmin, *vmax, flags); 410 submap->pmap = vm_map_pmap(map); 411 412 /* 413 * now let uvm_map_submap plug in it... 414 */ 415 416 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 417 panic("uvm_km_suballoc: submap allocation failed"); 418 419 return(submap); 420} 421 422/* 423 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 424 */ 425 426void 427uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 428{ 429 struct uvm_object * const uobj = uvm_kernel_object; 430 const voff_t start = startva - vm_map_min(kernel_map); 431 const voff_t end = endva - vm_map_min(kernel_map); 432 struct vm_page *pg; 433 voff_t curoff, nextoff; 434 int swpgonlydelta = 0; 435 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 436 437 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 438 KASSERT(startva < endva); 439 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 440 441 mutex_enter(uobj->vmobjlock); 442 pmap_remove(pmap_kernel(), startva, endva); 443 for (curoff = start; curoff < end; curoff = nextoff) { 444 nextoff = curoff + PAGE_SIZE; 445 pg = uvm_pagelookup(uobj, curoff); 446 if (pg != NULL && pg->flags & PG_BUSY) { 447 pg->flags |= PG_WANTED; 448 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 449 "km_pgrm", 0); 450 mutex_enter(uobj->vmobjlock); 451 nextoff = curoff; 452 continue; 453 } 454 455 /* 456 * free the swap slot, then the page. 457 */ 458 459 if (pg == NULL && 460 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 461 swpgonlydelta++; 462 } 463 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 464 if (pg != NULL) { 465 mutex_enter(&uvm_pageqlock); 466 uvm_pagefree(pg); 467 mutex_exit(&uvm_pageqlock); 468 } 469 } 470 mutex_exit(uobj->vmobjlock); 471 472 if (swpgonlydelta > 0) { 473 mutex_enter(&uvm_swap_data_lock); 474 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 475 uvmexp.swpgonly -= swpgonlydelta; 476 mutex_exit(&uvm_swap_data_lock); 477 } 478} 479 480 481/* 482 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 483 * regions. 484 * 485 * => when you unmap a part of anonymous kernel memory you want to toss 486 * the pages right away. (this is called from uvm_unmap_...). 487 * => none of the pages will ever be busy, and none of them will ever 488 * be on the active or inactive queues (because they have no object). 489 */ 490 491void 492uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 493{ 494#define __PGRM_BATCH 16 495 struct vm_page *pg; 496 paddr_t pa[__PGRM_BATCH]; 497 int npgrm, i; 498 vaddr_t va, batch_vastart; 499 500 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 501 502 KASSERT(VM_MAP_IS_KERNEL(map)); 503 KASSERTMSG(vm_map_min(map) <= start, 504 "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]" 505 " (size=%#"PRIxVSIZE")", 506 vm_map_min(map), start, end - start); 507 KASSERT(start < end); 508 KASSERT(end <= vm_map_max(map)); 509 510 for (va = start; va < end;) { 511 batch_vastart = va; 512 /* create a batch of at most __PGRM_BATCH pages to free */ 513 for (i = 0; 514 i < __PGRM_BATCH && va < end; 515 va += PAGE_SIZE) { 516 if (!pmap_extract(pmap_kernel(), va, &pa[i])) { 517 continue; 518 } 519 i++; 520 } 521 npgrm = i; 522 /* now remove the mappings */ 523 pmap_kremove(batch_vastart, va - batch_vastart); 524 /* and free the pages */ 525 for (i = 0; i < npgrm; i++) { 526 pg = PHYS_TO_VM_PAGE(pa[i]); 527 KASSERT(pg); 528 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 529 KASSERT((pg->flags & PG_BUSY) == 0); 530 uvm_pagefree(pg); 531 } 532 } 533#undef __PGRM_BATCH 534} 535 536#if defined(DEBUG) 537void 538uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 539{ 540 struct vm_page *pg; 541 vaddr_t va; 542 paddr_t pa; 543 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 544 545 KDASSERT(VM_MAP_IS_KERNEL(map)); 546 KDASSERT(vm_map_min(map) <= start); 547 KDASSERT(start < end); 548 KDASSERT(end <= vm_map_max(map)); 549 550 for (va = start; va < end; va += PAGE_SIZE) { 551 if (pmap_extract(pmap_kernel(), va, &pa)) { 552 panic("uvm_km_check_empty: va %p has pa 0x%llx", 553 (void *)va, (long long)pa); 554 } 555 mutex_enter(uvm_kernel_object->vmobjlock); 556 pg = uvm_pagelookup(uvm_kernel_object, 557 va - vm_map_min(kernel_map)); 558 mutex_exit(uvm_kernel_object->vmobjlock); 559 if (pg) { 560 panic("uvm_km_check_empty: " 561 "has page hashed at %p", (const void *)va); 562 } 563 } 564} 565#endif /* defined(DEBUG) */ 566 567/* 568 * uvm_km_alloc: allocate an area of kernel memory. 569 * 570 * => NOTE: we can return 0 even if we can wait if there is not enough 571 * free VM space in the map... caller should be prepared to handle 572 * this case. 573 * => we return KVA of memory allocated 574 */ 575 576vaddr_t 577uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 578{ 579 vaddr_t kva, loopva; 580 vaddr_t offset; 581 vsize_t loopsize; 582 struct vm_page *pg; 583 struct uvm_object *obj; 584 int pgaflags; 585 vm_prot_t prot; 586 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 587 588 KASSERT(vm_map_pmap(map) == pmap_kernel()); 589 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 590 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 591 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 592 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 593 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 594 595 /* 596 * setup for call 597 */ 598 599 kva = vm_map_min(map); /* hint */ 600 size = round_page(size); 601 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 602 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 603 map, obj, size, flags); 604 605 /* 606 * allocate some virtual space 607 */ 608 609 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 610 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 611 UVM_ADV_RANDOM, 612 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 613 | UVM_KMF_COLORMATCH)))) != 0)) { 614 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 615 return(0); 616 } 617 618 /* 619 * if all we wanted was VA, return now 620 */ 621 622 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 623 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 624 return(kva); 625 } 626 627 /* 628 * recover object offset from virtual address 629 */ 630 631 offset = kva - vm_map_min(kernel_map); 632 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 633 634 /* 635 * now allocate and map in the memory... note that we are the only ones 636 * whom should ever get a handle on this area of VM. 637 */ 638 639 loopva = kva; 640 loopsize = size; 641 642 pgaflags = UVM_FLAG_COLORMATCH; 643 if (flags & UVM_KMF_NOWAIT) 644 pgaflags |= UVM_PGA_USERESERVE; 645 if (flags & UVM_KMF_ZERO) 646 pgaflags |= UVM_PGA_ZERO; 647 prot = VM_PROT_READ | VM_PROT_WRITE; 648 if (flags & UVM_KMF_EXEC) 649 prot |= VM_PROT_EXECUTE; 650 while (loopsize) { 651 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 652 "loopva=%#"PRIxVADDR, loopva); 653 654 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 655#ifdef UVM_KM_VMFREELIST 656 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 657#else 658 UVM_PGA_STRAT_NORMAL, 0 659#endif 660 ); 661 662 /* 663 * out of memory? 664 */ 665 666 if (__predict_false(pg == NULL)) { 667 if ((flags & UVM_KMF_NOWAIT) || 668 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 669 /* free everything! */ 670 uvm_km_free(map, kva, size, 671 flags & UVM_KMF_TYPEMASK); 672 return (0); 673 } else { 674 uvm_wait("km_getwait2"); /* sleep here */ 675 continue; 676 } 677 } 678 679 pg->flags &= ~PG_BUSY; /* new page */ 680 UVM_PAGE_OWN(pg, NULL); 681 682 /* 683 * map it in 684 */ 685 686 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 687 prot, PMAP_KMPAGE); 688 loopva += PAGE_SIZE; 689 offset += PAGE_SIZE; 690 loopsize -= PAGE_SIZE; 691 } 692 693 pmap_update(pmap_kernel()); 694 695 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 696 return(kva); 697} 698 699/* 700 * uvm_km_free: free an area of kernel memory 701 */ 702 703void 704uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 705{ 706 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 707 708 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 709 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 710 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 711 KASSERT((addr & PAGE_MASK) == 0); 712 KASSERT(vm_map_pmap(map) == pmap_kernel()); 713 714 size = round_page(size); 715 716 if (flags & UVM_KMF_PAGEABLE) { 717 uvm_km_pgremove(addr, addr + size); 718 } else if (flags & UVM_KMF_WIRED) { 719 /* 720 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 721 * remove it after. See comment below about KVA visibility. 722 */ 723 uvm_km_pgremove_intrsafe(map, addr, addr + size); 724 } 725 726 /* 727 * Note: uvm_unmap_remove() calls pmap_update() for us, before 728 * KVA becomes globally available. 729 */ 730 731 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 732} 733 734/* Sanity; must specify both or none. */ 735#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 736 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 737#error Must specify MAP and UNMAP together. 738#endif 739 740int 741uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 742 vmem_addr_t *addr) 743{ 744 struct vm_page *pg; 745 vmem_addr_t va; 746 int rc; 747 vaddr_t loopva; 748 vsize_t loopsize; 749 750 size = round_page(size); 751 752#if defined(PMAP_MAP_POOLPAGE) 753 if (size == PAGE_SIZE) { 754again: 755#ifdef PMAP_ALLOC_POOLPAGE 756 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 757 0 : UVM_PGA_USERESERVE); 758#else 759 pg = uvm_pagealloc(NULL, 0, NULL, 760 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 761#endif /* PMAP_ALLOC_POOLPAGE */ 762 if (__predict_false(pg == NULL)) { 763 if (flags & VM_SLEEP) { 764 uvm_wait("plpg"); 765 goto again; 766 } 767 return ENOMEM; 768 } 769 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 770 if (__predict_false(va == 0)) { 771 uvm_pagefree(pg); 772 return ENOMEM; 773 } 774 *addr = va; 775 return 0; 776 } 777#endif /* PMAP_MAP_POOLPAGE */ 778 779 rc = vmem_alloc(vm, size, flags, &va); 780 if (rc != 0) 781 return rc; 782 783#ifdef PMAP_GROWKERNEL 784 /* 785 * These VA allocations happen independently of uvm_map so if this allocation 786 * extends beyond the current limit, then allocate more resources for it. 787 * This can only happen while the kmem_map is the only map entry in the 788 * kernel_map because as soon as another map entry is created, uvm_map_prepare 789 * will set uvm_maxkaddr to an address beyond the kmem_map. 790 */ 791 if (uvm_maxkaddr < va + size) { 792 uvm_maxkaddr = pmap_growkernel(va + size); 793 KASSERTMSG(uvm_maxkaddr >= va + size, 794 "%#"PRIxVADDR" %#"PRIxPTR" %#zx", 795 uvm_maxkaddr, va, size); 796 } 797#endif 798 799 loopva = va; 800 loopsize = size; 801 802 while (loopsize) { 803#ifdef DIAGNOSTIC 804 paddr_t pa; 805#endif 806 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa), 807 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE 808 " pa=%#"PRIxPADDR" vmem=%p", 809 loopva, loopsize, pa, vm); 810 811 pg = uvm_pagealloc(NULL, loopva, NULL, 812 UVM_FLAG_COLORMATCH 813 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 814 if (__predict_false(pg == NULL)) { 815 if (flags & VM_SLEEP) { 816 uvm_wait("plpg"); 817 continue; 818 } else { 819 uvm_km_pgremove_intrsafe(kernel_map, va, 820 va + size); 821 vmem_free(vm, va, size); 822 return ENOMEM; 823 } 824 } 825 826 pg->flags &= ~PG_BUSY; /* new page */ 827 UVM_PAGE_OWN(pg, NULL); 828 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 829 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 830 831 loopva += PAGE_SIZE; 832 loopsize -= PAGE_SIZE; 833 } 834 pmap_update(pmap_kernel()); 835 836 *addr = va; 837 838 return 0; 839} 840 841void 842uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 843{ 844 845 size = round_page(size); 846#if defined(PMAP_UNMAP_POOLPAGE) 847 if (size == PAGE_SIZE) { 848 paddr_t pa; 849 850 pa = PMAP_UNMAP_POOLPAGE(addr); 851 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 852 return; 853 } 854#endif /* PMAP_UNMAP_POOLPAGE */ 855 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 856 pmap_update(pmap_kernel()); 857 858 vmem_free(vm, addr, size); 859} 860 861bool 862uvm_km_va_starved_p(void) 863{ 864 vmem_size_t total; 865 vmem_size_t free; 866 867 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 868 free = vmem_size(kmem_arena, VMEM_FREE); 869 870 return (free < (total / 10)); 871} 872