uvm_km.c revision 1.136
1/* $NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps/arenas, including: 87 * kmem_arena => used for kmem/pool (memoryallocators(9)) 88 * pager_map => used to map "buf" structures into kernel space 89 * exec_map => used during exec to handle exec args 90 * etc... 91 * 92 * The kmem_arena is a "special submap", as it lives in a fixed map entry 93 * within the kernel_map and is controlled by vmem(9). 94 * 95 * the kernel allocates its private memory out of special uvm_objects whose 96 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 97 * are "special" and never die). all kernel objects should be thought of 98 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 99 * object is equal to the size of kernel virtual address space (i.e. the 100 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 101 * 102 * note that just because a kernel object spans the entire kernel virtual 103 * address space doesn't mean that it has to be mapped into the entire space. 104 * large chunks of a kernel object's space go unused either because 105 * that area of kernel VM is unmapped, or there is some other type of 106 * object mapped into that range (e.g. a vnode). for submap's kernel 107 * objects, the only part of the object that can ever be populated is the 108 * offsets that are managed by the submap. 109 * 110 * note that the "offset" in a kernel object is always the kernel virtual 111 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 112 * example: 113 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 114 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 115 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 116 * then that means that the page at offset 0x235000 in kernel_object is 117 * mapped at 0xf8235000. 118 * 119 * kernel object have one other special property: when the kernel virtual 120 * memory mapping them is unmapped, the backing memory in the object is 121 * freed right away. this is done with the uvm_km_pgremove() function. 122 * this has to be done because there is no backing store for kernel pages 123 * and no need to save them after they are no longer referenced. 124 * 125 * Generic arenas: 126 * 127 * kmem_arena: 128 * Main arena controlling the kernel KVA used by other arenas. 129 * 130 * kmem_va_arena: 131 * Implements quantum caching in order to speedup allocations and 132 * reduce fragmentation. The pool(9), unless created with a custom 133 * meta-data allocator, and kmem(9) subsystems use this arena. 134 * 135 * Arenas for meta-data allocations are used by vmem(9) and pool(9). 136 * These arenas cannot use quantum cache. However, kmem_va_meta_arena 137 * compensates this by importing larger chunks from kmem_arena. 138 * 139 * kmem_va_meta_arena: 140 * Space for meta-data. 141 * 142 * kmem_meta_arena: 143 * Imports from kmem_va_meta_arena. Allocations from this arena are 144 * backed with the pages. 145 * 146 * Arena stacking: 147 * 148 * kmem_arena 149 * kmem_va_arena 150 * kmem_va_meta_arena 151 * kmem_meta_arena 152 */ 153 154#include <sys/cdefs.h> 155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.136 2013/01/26 13:50:33 para Exp $"); 156 157#include "opt_uvmhist.h" 158 159#include "opt_kmempages.h" 160 161#ifndef NKMEMPAGES 162#define NKMEMPAGES 0 163#endif 164 165/* 166 * Defaults for lower and upper-bounds for the kmem_arena page count. 167 * Can be overridden by kernel config options. 168 */ 169#ifndef NKMEMPAGES_MIN 170#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 171#endif 172 173#ifndef NKMEMPAGES_MAX 174#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 175#endif 176 177 178#include <sys/param.h> 179#include <sys/systm.h> 180#include <sys/proc.h> 181#include <sys/pool.h> 182#include <sys/vmem.h> 183#include <sys/vmem_impl.h> 184#include <sys/kmem.h> 185 186#include <uvm/uvm.h> 187 188/* 189 * global data structures 190 */ 191 192struct vm_map *kernel_map = NULL; 193 194/* 195 * local data structues 196 */ 197 198static struct vm_map kernel_map_store; 199static struct vm_map_entry kernel_image_mapent_store; 200static struct vm_map_entry kernel_kmem_mapent_store; 201 202int nkmempages = 0; 203vaddr_t kmembase; 204vsize_t kmemsize; 205 206static struct vmem kmem_arena_store; 207vmem_t *kmem_arena = NULL; 208vmem_t *kmem_va_arena; 209 210/* 211 * kmeminit_nkmempages: calculate the size of kmem_arena. 212 */ 213void 214kmeminit_nkmempages(void) 215{ 216 int npages; 217 218 if (nkmempages != 0) { 219 /* 220 * It's already been set (by us being here before) 221 * bail out now; 222 */ 223 return; 224 } 225 226#if defined(PMAP_MAP_POOLPAGE) 227 npages = (physmem / 4); 228#else 229 npages = (physmem / 3) * 2; 230#endif /* defined(PMAP_MAP_POOLPAGE) */ 231 232#ifndef NKMEMPAGES_MAX_UNLIMITED 233 if (npages > NKMEMPAGES_MAX) 234 npages = NKMEMPAGES_MAX; 235#endif 236 237 if (npages < NKMEMPAGES_MIN) 238 npages = NKMEMPAGES_MIN; 239 240 nkmempages = npages; 241} 242 243/* 244 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 245 * KVM already allocated for text, data, bss, and static data structures). 246 * 247 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 248 * we assume that [vmin -> start] has already been allocated and that 249 * "end" is the end. 250 */ 251 252void 253uvm_km_bootstrap(vaddr_t start, vaddr_t end) 254{ 255 bool kmem_arena_small; 256 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 257 struct uvm_map_args args; 258 int error; 259 260 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 261 UVMHIST_LOG(maphist, "start=%"PRIxVADDR" end=%#"PRIxVADDR, 262 start, end, 0,0); 263 264 kmeminit_nkmempages(); 265 kmemsize = (vsize_t)nkmempages * PAGE_SIZE; 266 kmem_arena_small = kmemsize < 64 * 1024 * 1024; 267 268 UVMHIST_LOG(maphist, "kmemsize=%#"PRIxVSIZE, kmemsize, 0,0,0); 269 270 /* 271 * next, init kernel memory objects. 272 */ 273 274 /* kernel_object: for pageable anonymous kernel memory */ 275 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 276 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 277 278 /* 279 * init the map and reserve any space that might already 280 * have been allocated kernel space before installing. 281 */ 282 283 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 284 kernel_map_store.pmap = pmap_kernel(); 285 if (start != base) { 286 error = uvm_map_prepare(&kernel_map_store, 287 base, start - base, 288 NULL, UVM_UNKNOWN_OFFSET, 0, 289 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 290 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 291 if (!error) { 292 kernel_image_mapent_store.flags = 293 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 294 error = uvm_map_enter(&kernel_map_store, &args, 295 &kernel_image_mapent_store); 296 } 297 298 if (error) 299 panic( 300 "uvm_km_bootstrap: could not reserve space for kernel"); 301 302 kmembase = args.uma_start + args.uma_size; 303 } else { 304 kmembase = base; 305 } 306 307 error = uvm_map_prepare(&kernel_map_store, 308 kmembase, kmemsize, 309 NULL, UVM_UNKNOWN_OFFSET, 0, 310 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 311 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 312 if (!error) { 313 kernel_kmem_mapent_store.flags = 314 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 315 error = uvm_map_enter(&kernel_map_store, &args, 316 &kernel_kmem_mapent_store); 317 } 318 319 if (error) 320 panic("uvm_km_bootstrap: could not reserve kernel kmem"); 321 322 /* 323 * install! 324 */ 325 326 kernel_map = &kernel_map_store; 327 328 pool_subsystem_init(); 329 330 kmem_arena = vmem_init(&kmem_arena_store, "kmem", 331 kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL, 332 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 333#ifdef PMAP_GROWKERNEL 334 /* 335 * kmem_arena VA allocations happen independently of uvm_map. 336 * grow kernel to accommodate the kmem_arena. 337 */ 338 if (uvm_maxkaddr < kmembase + kmemsize) { 339 uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize); 340 KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize, 341 "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE, 342 uvm_maxkaddr, kmembase, kmemsize); 343 } 344#endif 345 346 vmem_create_arenas(kmem_arena); 347 348 UVMHIST_LOG(maphist, "kmem vmem created (base=%#"PRIxVADDR 349 ", size=%#"PRIxVSIZE, kmembase, kmemsize, 0,0); 350 351 kmem_va_arena = vmem_create("kva", 0, 0, PAGE_SIZE, 352 vmem_alloc, vmem_free, kmem_arena, 353 (kmem_arena_small ? 4 : 8) * PAGE_SIZE, 354 VM_NOSLEEP, IPL_VM); 355 356 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 357} 358 359/* 360 * uvm_km_init: init the kernel maps virtual memory caches 361 * and start the pool/kmem allocator. 362 */ 363void 364uvm_km_init(void) 365{ 366 367 kmem_init(); 368 369 kmeminit(); // killme 370} 371 372/* 373 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 374 * is allocated all references to that area of VM must go through it. this 375 * allows the locking of VAs in kernel_map to be broken up into regions. 376 * 377 * => if `fixed' is true, *vmin specifies where the region described 378 * pager_map => used to map "buf" structures into kernel space 379 * by the submap must start 380 * => if submap is non NULL we use that as the submap, otherwise we 381 * alloc a new map 382 */ 383 384struct vm_map * 385uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 386 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 387 struct vm_map *submap) 388{ 389 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 390 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 391 392 KASSERT(vm_map_pmap(map) == pmap_kernel()); 393 394 size = round_page(size); /* round up to pagesize */ 395 396 /* 397 * first allocate a blank spot in the parent map 398 */ 399 400 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 401 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 402 UVM_ADV_RANDOM, mapflags)) != 0) { 403 panic("%s: unable to allocate space in parent map", __func__); 404 } 405 406 /* 407 * set VM bounds (vmin is filled in by uvm_map) 408 */ 409 410 *vmax = *vmin + size; 411 412 /* 413 * add references to pmap and create or init the submap 414 */ 415 416 pmap_reference(vm_map_pmap(map)); 417 if (submap == NULL) { 418 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 419 if (submap == NULL) 420 panic("uvm_km_suballoc: unable to create submap"); 421 } 422 uvm_map_setup(submap, *vmin, *vmax, flags); 423 submap->pmap = vm_map_pmap(map); 424 425 /* 426 * now let uvm_map_submap plug in it... 427 */ 428 429 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 430 panic("uvm_km_suballoc: submap allocation failed"); 431 432 return(submap); 433} 434 435/* 436 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 437 */ 438 439void 440uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 441{ 442 struct uvm_object * const uobj = uvm_kernel_object; 443 const voff_t start = startva - vm_map_min(kernel_map); 444 const voff_t end = endva - vm_map_min(kernel_map); 445 struct vm_page *pg; 446 voff_t curoff, nextoff; 447 int swpgonlydelta = 0; 448 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 449 450 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 451 KASSERT(startva < endva); 452 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 453 454 mutex_enter(uobj->vmobjlock); 455 pmap_remove(pmap_kernel(), startva, endva); 456 for (curoff = start; curoff < end; curoff = nextoff) { 457 nextoff = curoff + PAGE_SIZE; 458 pg = uvm_pagelookup(uobj, curoff); 459 if (pg != NULL && pg->flags & PG_BUSY) { 460 pg->flags |= PG_WANTED; 461 UVM_UNLOCK_AND_WAIT(pg, uobj->vmobjlock, 0, 462 "km_pgrm", 0); 463 mutex_enter(uobj->vmobjlock); 464 nextoff = curoff; 465 continue; 466 } 467 468 /* 469 * free the swap slot, then the page. 470 */ 471 472 if (pg == NULL && 473 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 474 swpgonlydelta++; 475 } 476 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 477 if (pg != NULL) { 478 mutex_enter(&uvm_pageqlock); 479 uvm_pagefree(pg); 480 mutex_exit(&uvm_pageqlock); 481 } 482 } 483 mutex_exit(uobj->vmobjlock); 484 485 if (swpgonlydelta > 0) { 486 mutex_enter(&uvm_swap_data_lock); 487 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 488 uvmexp.swpgonly -= swpgonlydelta; 489 mutex_exit(&uvm_swap_data_lock); 490 } 491} 492 493 494/* 495 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 496 * regions. 497 * 498 * => when you unmap a part of anonymous kernel memory you want to toss 499 * the pages right away. (this is called from uvm_unmap_...). 500 * => none of the pages will ever be busy, and none of them will ever 501 * be on the active or inactive queues (because they have no object). 502 */ 503 504void 505uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 506{ 507#define __PGRM_BATCH 16 508 struct vm_page *pg; 509 paddr_t pa[__PGRM_BATCH]; 510 int npgrm, i; 511 vaddr_t va, batch_vastart; 512 513 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 514 515 KASSERT(VM_MAP_IS_KERNEL(map)); 516 KASSERTMSG(vm_map_min(map) <= start, 517 "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]" 518 " (size=%#"PRIxVSIZE")", 519 vm_map_min(map), start, end - start); 520 KASSERT(start < end); 521 KASSERT(end <= vm_map_max(map)); 522 523 for (va = start; va < end;) { 524 batch_vastart = va; 525 /* create a batch of at most __PGRM_BATCH pages to free */ 526 for (i = 0; 527 i < __PGRM_BATCH && va < end; 528 va += PAGE_SIZE) { 529 if (!pmap_extract(pmap_kernel(), va, &pa[i])) { 530 continue; 531 } 532 i++; 533 } 534 npgrm = i; 535 /* now remove the mappings */ 536 pmap_kremove(batch_vastart, va - batch_vastart); 537 /* and free the pages */ 538 for (i = 0; i < npgrm; i++) { 539 pg = PHYS_TO_VM_PAGE(pa[i]); 540 KASSERT(pg); 541 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 542 KASSERT((pg->flags & PG_BUSY) == 0); 543 uvm_pagefree(pg); 544 } 545 } 546#undef __PGRM_BATCH 547} 548 549#if defined(DEBUG) 550void 551uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 552{ 553 struct vm_page *pg; 554 vaddr_t va; 555 paddr_t pa; 556 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 557 558 KDASSERT(VM_MAP_IS_KERNEL(map)); 559 KDASSERT(vm_map_min(map) <= start); 560 KDASSERT(start < end); 561 KDASSERT(end <= vm_map_max(map)); 562 563 for (va = start; va < end; va += PAGE_SIZE) { 564 if (pmap_extract(pmap_kernel(), va, &pa)) { 565 panic("uvm_km_check_empty: va %p has pa 0x%llx", 566 (void *)va, (long long)pa); 567 } 568 mutex_enter(uvm_kernel_object->vmobjlock); 569 pg = uvm_pagelookup(uvm_kernel_object, 570 va - vm_map_min(kernel_map)); 571 mutex_exit(uvm_kernel_object->vmobjlock); 572 if (pg) { 573 panic("uvm_km_check_empty: " 574 "has page hashed at %p", (const void *)va); 575 } 576 } 577} 578#endif /* defined(DEBUG) */ 579 580/* 581 * uvm_km_alloc: allocate an area of kernel memory. 582 * 583 * => NOTE: we can return 0 even if we can wait if there is not enough 584 * free VM space in the map... caller should be prepared to handle 585 * this case. 586 * => we return KVA of memory allocated 587 */ 588 589vaddr_t 590uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 591{ 592 vaddr_t kva, loopva; 593 vaddr_t offset; 594 vsize_t loopsize; 595 struct vm_page *pg; 596 struct uvm_object *obj; 597 int pgaflags; 598 vm_prot_t prot; 599 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 600 601 KASSERT(vm_map_pmap(map) == pmap_kernel()); 602 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 603 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 604 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 605 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 606 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 607 608 /* 609 * setup for call 610 */ 611 612 kva = vm_map_min(map); /* hint */ 613 size = round_page(size); 614 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 615 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 616 map, obj, size, flags); 617 618 /* 619 * allocate some virtual space 620 */ 621 622 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 623 align, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 624 UVM_ADV_RANDOM, 625 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 626 | UVM_KMF_COLORMATCH)))) != 0)) { 627 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 628 return(0); 629 } 630 631 /* 632 * if all we wanted was VA, return now 633 */ 634 635 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 636 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 637 return(kva); 638 } 639 640 /* 641 * recover object offset from virtual address 642 */ 643 644 offset = kva - vm_map_min(kernel_map); 645 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 646 647 /* 648 * now allocate and map in the memory... note that we are the only ones 649 * whom should ever get a handle on this area of VM. 650 */ 651 652 loopva = kva; 653 loopsize = size; 654 655 pgaflags = UVM_FLAG_COLORMATCH; 656 if (flags & UVM_KMF_NOWAIT) 657 pgaflags |= UVM_PGA_USERESERVE; 658 if (flags & UVM_KMF_ZERO) 659 pgaflags |= UVM_PGA_ZERO; 660 prot = VM_PROT_READ | VM_PROT_WRITE; 661 if (flags & UVM_KMF_EXEC) 662 prot |= VM_PROT_EXECUTE; 663 while (loopsize) { 664 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 665 "loopva=%#"PRIxVADDR, loopva); 666 667 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 668#ifdef UVM_KM_VMFREELIST 669 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 670#else 671 UVM_PGA_STRAT_NORMAL, 0 672#endif 673 ); 674 675 /* 676 * out of memory? 677 */ 678 679 if (__predict_false(pg == NULL)) { 680 if ((flags & UVM_KMF_NOWAIT) || 681 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 682 /* free everything! */ 683 uvm_km_free(map, kva, size, 684 flags & UVM_KMF_TYPEMASK); 685 return (0); 686 } else { 687 uvm_wait("km_getwait2"); /* sleep here */ 688 continue; 689 } 690 } 691 692 pg->flags &= ~PG_BUSY; /* new page */ 693 UVM_PAGE_OWN(pg, NULL); 694 695 /* 696 * map it in 697 */ 698 699 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 700 prot, PMAP_KMPAGE); 701 loopva += PAGE_SIZE; 702 offset += PAGE_SIZE; 703 loopsize -= PAGE_SIZE; 704 } 705 706 pmap_update(pmap_kernel()); 707 708 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 709 return(kva); 710} 711 712/* 713 * uvm_km_free: free an area of kernel memory 714 */ 715 716void 717uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 718{ 719 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 720 721 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 722 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 723 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 724 KASSERT((addr & PAGE_MASK) == 0); 725 KASSERT(vm_map_pmap(map) == pmap_kernel()); 726 727 size = round_page(size); 728 729 if (flags & UVM_KMF_PAGEABLE) { 730 uvm_km_pgremove(addr, addr + size); 731 } else if (flags & UVM_KMF_WIRED) { 732 /* 733 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 734 * remove it after. See comment below about KVA visibility. 735 */ 736 uvm_km_pgremove_intrsafe(map, addr, addr + size); 737 } 738 739 /* 740 * Note: uvm_unmap_remove() calls pmap_update() for us, before 741 * KVA becomes globally available. 742 */ 743 744 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 745} 746 747/* Sanity; must specify both or none. */ 748#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 749 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 750#error Must specify MAP and UNMAP together. 751#endif 752 753int 754uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 755 vmem_addr_t *addr) 756{ 757 struct vm_page *pg; 758 vmem_addr_t va; 759 int rc; 760 vaddr_t loopva; 761 vsize_t loopsize; 762 763 size = round_page(size); 764 765#if defined(PMAP_MAP_POOLPAGE) 766 if (size == PAGE_SIZE) { 767again: 768#ifdef PMAP_ALLOC_POOLPAGE 769 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 770 0 : UVM_PGA_USERESERVE); 771#else 772 pg = uvm_pagealloc(NULL, 0, NULL, 773 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 774#endif /* PMAP_ALLOC_POOLPAGE */ 775 if (__predict_false(pg == NULL)) { 776 if (flags & VM_SLEEP) { 777 uvm_wait("plpg"); 778 goto again; 779 } 780 return ENOMEM; 781 } 782 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 783 if (__predict_false(va == 0)) { 784 uvm_pagefree(pg); 785 return ENOMEM; 786 } 787 *addr = va; 788 return 0; 789 } 790#endif /* PMAP_MAP_POOLPAGE */ 791 792 rc = vmem_alloc(vm, size, flags, &va); 793 if (rc != 0) 794 return rc; 795 796#ifdef PMAP_GROWKERNEL 797 /* 798 * These VA allocations happen independently of uvm_map 799 * so this allocation must not extend beyond the current limit. 800 */ 801 KASSERTMSG(uvm_maxkaddr >= va + size, 802 "%#"PRIxVADDR" %#"PRIxPTR" %#zx", 803 uvm_maxkaddr, va, size); 804#endif 805 806 loopva = va; 807 loopsize = size; 808 809 while (loopsize) { 810#ifdef DIAGNOSTIC 811 paddr_t pa; 812#endif 813 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa), 814 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE 815 " pa=%#"PRIxPADDR" vmem=%p", 816 loopva, loopsize, pa, vm); 817 818 pg = uvm_pagealloc(NULL, loopva, NULL, 819 UVM_FLAG_COLORMATCH 820 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 821 if (__predict_false(pg == NULL)) { 822 if (flags & VM_SLEEP) { 823 uvm_wait("plpg"); 824 continue; 825 } else { 826 uvm_km_pgremove_intrsafe(kernel_map, va, 827 va + size); 828 vmem_free(vm, va, size); 829 return ENOMEM; 830 } 831 } 832 833 pg->flags &= ~PG_BUSY; /* new page */ 834 UVM_PAGE_OWN(pg, NULL); 835 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 836 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 837 838 loopva += PAGE_SIZE; 839 loopsize -= PAGE_SIZE; 840 } 841 pmap_update(pmap_kernel()); 842 843 *addr = va; 844 845 return 0; 846} 847 848void 849uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 850{ 851 852 size = round_page(size); 853#if defined(PMAP_UNMAP_POOLPAGE) 854 if (size == PAGE_SIZE) { 855 paddr_t pa; 856 857 pa = PMAP_UNMAP_POOLPAGE(addr); 858 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 859 return; 860 } 861#endif /* PMAP_UNMAP_POOLPAGE */ 862 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 863 pmap_update(pmap_kernel()); 864 865 vmem_free(vm, addr, size); 866} 867 868bool 869uvm_km_va_starved_p(void) 870{ 871 vmem_size_t total; 872 vmem_size_t free; 873 874 if (kmem_arena == NULL) 875 return false; 876 877 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 878 free = vmem_size(kmem_arena, VMEM_FREE); 879 880 return (free < (total / 10)); 881} 882