uvm_km.c revision 1.158
1/* $NetBSD: uvm_km.c,v 1.158 2020/07/08 13:26:22 skrll Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 37 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 38 * 39 * 40 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 41 * All rights reserved. 42 * 43 * Permission to use, copy, modify and distribute this software and 44 * its documentation is hereby granted, provided that both the copyright 45 * notice and this permission notice appear in all copies of the 46 * software, derivative works or modified versions, and any portions 47 * thereof, and that both notices appear in supporting documentation. 48 * 49 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 50 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 51 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 52 * 53 * Carnegie Mellon requests users of this software to return to 54 * 55 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 56 * School of Computer Science 57 * Carnegie Mellon University 58 * Pittsburgh PA 15213-3890 59 * 60 * any improvements or extensions that they make and grant Carnegie the 61 * rights to redistribute these changes. 62 */ 63 64/* 65 * uvm_km.c: handle kernel memory allocation and management 66 */ 67 68/* 69 * overview of kernel memory management: 70 * 71 * the kernel virtual address space is mapped by "kernel_map." kernel_map 72 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 73 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 74 * 75 * the kernel_map has several "submaps." submaps can only appear in 76 * the kernel_map (user processes can't use them). submaps "take over" 77 * the management of a sub-range of the kernel's address space. submaps 78 * are typically allocated at boot time and are never released. kernel 79 * virtual address space that is mapped by a submap is locked by the 80 * submap's lock -- not the kernel_map's lock. 81 * 82 * thus, the useful feature of submaps is that they allow us to break 83 * up the locking and protection of the kernel address space into smaller 84 * chunks. 85 * 86 * the vm system has several standard kernel submaps/arenas, including: 87 * kmem_arena => used for kmem/pool (memoryallocators(9)) 88 * pager_map => used to map "buf" structures into kernel space 89 * exec_map => used during exec to handle exec args 90 * etc... 91 * 92 * The kmem_arena is a "special submap", as it lives in a fixed map entry 93 * within the kernel_map and is controlled by vmem(9). 94 * 95 * the kernel allocates its private memory out of special uvm_objects whose 96 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 97 * are "special" and never die). all kernel objects should be thought of 98 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 99 * object is equal to the size of kernel virtual address space (i.e. the 100 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 101 * 102 * note that just because a kernel object spans the entire kernel virtual 103 * address space doesn't mean that it has to be mapped into the entire space. 104 * large chunks of a kernel object's space go unused either because 105 * that area of kernel VM is unmapped, or there is some other type of 106 * object mapped into that range (e.g. a vnode). for submap's kernel 107 * objects, the only part of the object that can ever be populated is the 108 * offsets that are managed by the submap. 109 * 110 * note that the "offset" in a kernel object is always the kernel virtual 111 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 112 * example: 113 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 114 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 115 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 116 * then that means that the page at offset 0x235000 in kernel_object is 117 * mapped at 0xf8235000. 118 * 119 * kernel object have one other special property: when the kernel virtual 120 * memory mapping them is unmapped, the backing memory in the object is 121 * freed right away. this is done with the uvm_km_pgremove() function. 122 * this has to be done because there is no backing store for kernel pages 123 * and no need to save them after they are no longer referenced. 124 * 125 * Generic arenas: 126 * 127 * kmem_arena: 128 * Main arena controlling the kernel KVA used by other arenas. 129 * 130 * kmem_va_arena: 131 * Implements quantum caching in order to speedup allocations and 132 * reduce fragmentation. The pool(9), unless created with a custom 133 * meta-data allocator, and kmem(9) subsystems use this arena. 134 * 135 * Arenas for meta-data allocations are used by vmem(9) and pool(9). 136 * These arenas cannot use quantum cache. However, kmem_va_meta_arena 137 * compensates this by importing larger chunks from kmem_arena. 138 * 139 * kmem_va_meta_arena: 140 * Space for meta-data. 141 * 142 * kmem_meta_arena: 143 * Imports from kmem_va_meta_arena. Allocations from this arena are 144 * backed with the pages. 145 * 146 * Arena stacking: 147 * 148 * kmem_arena 149 * kmem_va_arena 150 * kmem_va_meta_arena 151 * kmem_meta_arena 152 */ 153 154#include <sys/cdefs.h> 155__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.158 2020/07/08 13:26:22 skrll Exp $"); 156 157#include "opt_uvmhist.h" 158 159#include "opt_kmempages.h" 160 161#ifndef NKMEMPAGES 162#define NKMEMPAGES 0 163#endif 164 165/* 166 * Defaults for lower and upper-bounds for the kmem_arena page count. 167 * Can be overridden by kernel config options. 168 */ 169#ifndef NKMEMPAGES_MIN 170#define NKMEMPAGES_MIN NKMEMPAGES_MIN_DEFAULT 171#endif 172 173#ifndef NKMEMPAGES_MAX 174#define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT 175#endif 176 177 178#include <sys/param.h> 179#include <sys/systm.h> 180#include <sys/atomic.h> 181#include <sys/proc.h> 182#include <sys/pool.h> 183#include <sys/vmem.h> 184#include <sys/vmem_impl.h> 185#include <sys/kmem.h> 186#include <sys/msan.h> 187 188#include <uvm/uvm.h> 189 190/* 191 * global data structures 192 */ 193 194struct vm_map *kernel_map = NULL; 195 196/* 197 * local data structues 198 */ 199 200static struct vm_map kernel_map_store; 201static struct vm_map_entry kernel_image_mapent_store; 202static struct vm_map_entry kernel_kmem_mapent_store; 203 204int nkmempages = 0; 205vaddr_t kmembase; 206vsize_t kmemsize; 207 208static struct vmem kmem_arena_store; 209vmem_t *kmem_arena = NULL; 210static struct vmem kmem_va_arena_store; 211vmem_t *kmem_va_arena; 212 213/* 214 * kmeminit_nkmempages: calculate the size of kmem_arena. 215 */ 216void 217kmeminit_nkmempages(void) 218{ 219 int npages; 220 221 if (nkmempages != 0) { 222 /* 223 * It's already been set (by us being here before) 224 * bail out now; 225 */ 226 return; 227 } 228 229#if defined(KMSAN) 230 npages = (physmem / 8); 231#elif defined(PMAP_MAP_POOLPAGE) 232 npages = (physmem / 4); 233#else 234 npages = (physmem / 3) * 2; 235#endif /* defined(PMAP_MAP_POOLPAGE) */ 236 237#ifndef NKMEMPAGES_MAX_UNLIMITED 238 if (npages > NKMEMPAGES_MAX) 239 npages = NKMEMPAGES_MAX; 240#endif 241 242 if (npages < NKMEMPAGES_MIN) 243 npages = NKMEMPAGES_MIN; 244 245 nkmempages = npages; 246} 247 248/* 249 * uvm_km_bootstrap: init kernel maps and objects to reflect reality (i.e. 250 * KVM already allocated for text, data, bss, and static data structures). 251 * 252 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 253 * we assume that [vmin -> start] has already been allocated and that 254 * "end" is the end. 255 */ 256 257void 258uvm_km_bootstrap(vaddr_t start, vaddr_t end) 259{ 260 bool kmem_arena_small; 261 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 262 struct uvm_map_args args; 263 int error; 264 265 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 266 UVMHIST_LOG(maphist, "start=%#jx end=%#jx", start, end, 0,0); 267 268 kmeminit_nkmempages(); 269 kmemsize = (vsize_t)nkmempages * PAGE_SIZE; 270 kmem_arena_small = kmemsize < 64 * 1024 * 1024; 271 272 UVMHIST_LOG(maphist, "kmemsize=%#jx", kmemsize, 0,0,0); 273 274 /* 275 * next, init kernel memory objects. 276 */ 277 278 /* kernel_object: for pageable anonymous kernel memory */ 279 uvm_kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 280 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 281 282 /* 283 * init the map and reserve any space that might already 284 * have been allocated kernel space before installing. 285 */ 286 287 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 288 kernel_map_store.pmap = pmap_kernel(); 289 if (start != base) { 290 error = uvm_map_prepare(&kernel_map_store, 291 base, start - base, 292 NULL, UVM_UNKNOWN_OFFSET, 0, 293 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 294 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 295 if (!error) { 296 kernel_image_mapent_store.flags = 297 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 298 error = uvm_map_enter(&kernel_map_store, &args, 299 &kernel_image_mapent_store); 300 } 301 302 if (error) 303 panic( 304 "uvm_km_bootstrap: could not reserve space for kernel"); 305 306 kmembase = args.uma_start + args.uma_size; 307 } else { 308 kmembase = base; 309 } 310 311 error = uvm_map_prepare(&kernel_map_store, 312 kmembase, kmemsize, 313 NULL, UVM_UNKNOWN_OFFSET, 0, 314 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 315 UVM_ADV_RANDOM, UVM_FLAG_FIXED), &args); 316 if (!error) { 317 kernel_kmem_mapent_store.flags = 318 UVM_MAP_KERNEL | UVM_MAP_STATIC | UVM_MAP_NOMERGE; 319 error = uvm_map_enter(&kernel_map_store, &args, 320 &kernel_kmem_mapent_store); 321 } 322 323 if (error) 324 panic("uvm_km_bootstrap: could not reserve kernel kmem"); 325 326 /* 327 * install! 328 */ 329 330 kernel_map = &kernel_map_store; 331 332 pool_subsystem_init(); 333 334 kmem_arena = vmem_init(&kmem_arena_store, "kmem", 335 kmembase, kmemsize, PAGE_SIZE, NULL, NULL, NULL, 336 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 337#ifdef PMAP_GROWKERNEL 338 /* 339 * kmem_arena VA allocations happen independently of uvm_map. 340 * grow kernel to accommodate the kmem_arena. 341 */ 342 if (uvm_maxkaddr < kmembase + kmemsize) { 343 uvm_maxkaddr = pmap_growkernel(kmembase + kmemsize); 344 KASSERTMSG(uvm_maxkaddr >= kmembase + kmemsize, 345 "%#"PRIxVADDR" %#"PRIxVADDR" %#"PRIxVSIZE, 346 uvm_maxkaddr, kmembase, kmemsize); 347 } 348#endif 349 350 vmem_subsystem_init(kmem_arena); 351 352 UVMHIST_LOG(maphist, "kmem vmem created (base=%#jx, size=%#jx", 353 kmembase, kmemsize, 0,0); 354 355 kmem_va_arena = vmem_init(&kmem_va_arena_store, "kva", 356 0, 0, PAGE_SIZE, vmem_alloc, vmem_free, kmem_arena, 357 (kmem_arena_small ? 4 : VMEM_QCACHE_IDX_MAX) * PAGE_SIZE, 358 VM_NOSLEEP, IPL_VM); 359 360 UVMHIST_LOG(maphist, "<- done", 0,0,0,0); 361} 362 363/* 364 * uvm_km_init: init the kernel maps virtual memory caches 365 * and start the pool/kmem allocator. 366 */ 367void 368uvm_km_init(void) 369{ 370 kmem_init(); 371} 372 373/* 374 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 375 * is allocated all references to that area of VM must go through it. this 376 * allows the locking of VAs in kernel_map to be broken up into regions. 377 * 378 * => if `fixed' is true, *vmin specifies where the region described 379 * pager_map => used to map "buf" structures into kernel space 380 * by the submap must start 381 * => if submap is non NULL we use that as the submap, otherwise we 382 * alloc a new map 383 */ 384 385struct vm_map * 386uvm_km_suballoc(struct vm_map *map, vaddr_t *vmin /* IN/OUT */, 387 vaddr_t *vmax /* OUT */, vsize_t size, int flags, bool fixed, 388 struct vm_map *submap) 389{ 390 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 391 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 392 393 KASSERT(vm_map_pmap(map) == pmap_kernel()); 394 395 size = round_page(size); /* round up to pagesize */ 396 397 /* 398 * first allocate a blank spot in the parent map 399 */ 400 401 if (uvm_map(map, vmin, size, NULL, UVM_UNKNOWN_OFFSET, 0, 402 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 403 UVM_ADV_RANDOM, mapflags)) != 0) { 404 panic("%s: unable to allocate space in parent map", __func__); 405 } 406 407 /* 408 * set VM bounds (vmin is filled in by uvm_map) 409 */ 410 411 *vmax = *vmin + size; 412 413 /* 414 * add references to pmap and create or init the submap 415 */ 416 417 pmap_reference(vm_map_pmap(map)); 418 if (submap == NULL) { 419 submap = kmem_alloc(sizeof(*submap), KM_SLEEP); 420 } 421 uvm_map_setup(submap, *vmin, *vmax, flags); 422 submap->pmap = vm_map_pmap(map); 423 424 /* 425 * now let uvm_map_submap plug in it... 426 */ 427 428 if (uvm_map_submap(map, *vmin, *vmax, submap) != 0) 429 panic("uvm_km_suballoc: submap allocation failed"); 430 431 return(submap); 432} 433 434/* 435 * uvm_km_pgremove: remove pages from a kernel uvm_object and KVA. 436 */ 437 438void 439uvm_km_pgremove(vaddr_t startva, vaddr_t endva) 440{ 441 struct uvm_object * const uobj = uvm_kernel_object; 442 const voff_t start = startva - vm_map_min(kernel_map); 443 const voff_t end = endva - vm_map_min(kernel_map); 444 struct vm_page *pg; 445 voff_t curoff, nextoff; 446 int swpgonlydelta = 0; 447 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 448 449 KASSERT(VM_MIN_KERNEL_ADDRESS <= startva); 450 KASSERT(startva < endva); 451 KASSERT(endva <= VM_MAX_KERNEL_ADDRESS); 452 453 rw_enter(uobj->vmobjlock, RW_WRITER); 454 pmap_remove(pmap_kernel(), startva, endva); 455 for (curoff = start; curoff < end; curoff = nextoff) { 456 nextoff = curoff + PAGE_SIZE; 457 pg = uvm_pagelookup(uobj, curoff); 458 if (pg != NULL && pg->flags & PG_BUSY) { 459 uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm"); 460 rw_enter(uobj->vmobjlock, RW_WRITER); 461 nextoff = curoff; 462 continue; 463 } 464 465 /* 466 * free the swap slot, then the page. 467 */ 468 469 if (pg == NULL && 470 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 471 swpgonlydelta++; 472 } 473 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 474 if (pg != NULL) { 475 uvm_pagefree(pg); 476 } 477 } 478 rw_exit(uobj->vmobjlock); 479 480 if (swpgonlydelta > 0) { 481 KASSERT(uvmexp.swpgonly >= swpgonlydelta); 482 atomic_add_int(&uvmexp.swpgonly, -swpgonlydelta); 483 } 484} 485 486 487/* 488 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for non object backed 489 * regions. 490 * 491 * => when you unmap a part of anonymous kernel memory you want to toss 492 * the pages right away. (this is called from uvm_unmap_...). 493 * => none of the pages will ever be busy, and none of them will ever 494 * be on the active or inactive queues (because they have no object). 495 */ 496 497void 498uvm_km_pgremove_intrsafe(struct vm_map *map, vaddr_t start, vaddr_t end) 499{ 500#define __PGRM_BATCH 16 501 struct vm_page *pg; 502 paddr_t pa[__PGRM_BATCH]; 503 int npgrm, i; 504 vaddr_t va, batch_vastart; 505 506 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 507 508 KASSERT(VM_MAP_IS_KERNEL(map)); 509 KASSERTMSG(vm_map_min(map) <= start, 510 "vm_map_min(map) [%#"PRIxVADDR"] <= start [%#"PRIxVADDR"]" 511 " (size=%#"PRIxVSIZE")", 512 vm_map_min(map), start, end - start); 513 KASSERT(start < end); 514 KASSERT(end <= vm_map_max(map)); 515 516 for (va = start; va < end;) { 517 batch_vastart = va; 518 /* create a batch of at most __PGRM_BATCH pages to free */ 519 for (i = 0; 520 i < __PGRM_BATCH && va < end; 521 va += PAGE_SIZE) { 522 if (!pmap_extract(pmap_kernel(), va, &pa[i])) { 523 continue; 524 } 525 i++; 526 } 527 npgrm = i; 528 /* now remove the mappings */ 529 pmap_kremove(batch_vastart, va - batch_vastart); 530 /* and free the pages */ 531 for (i = 0; i < npgrm; i++) { 532 pg = PHYS_TO_VM_PAGE(pa[i]); 533 KASSERT(pg); 534 KASSERT(pg->uobject == NULL && pg->uanon == NULL); 535 KASSERT((pg->flags & PG_BUSY) == 0); 536 uvm_pagefree(pg); 537 } 538 } 539#undef __PGRM_BATCH 540} 541 542#if defined(DEBUG) 543void 544uvm_km_check_empty(struct vm_map *map, vaddr_t start, vaddr_t end) 545{ 546 vaddr_t va; 547 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 548 549 KDASSERT(VM_MAP_IS_KERNEL(map)); 550 KDASSERT(vm_map_min(map) <= start); 551 KDASSERT(start < end); 552 KDASSERT(end <= vm_map_max(map)); 553 554 for (va = start; va < end; va += PAGE_SIZE) { 555 paddr_t pa; 556 557 if (pmap_extract(pmap_kernel(), va, &pa)) { 558 panic("uvm_km_check_empty: va %p has pa %#llx", 559 (void *)va, (long long)pa); 560 } 561 /* 562 * kernel_object should not have pages for the corresponding 563 * region. check it. 564 * 565 * why trylock? because: 566 * - caller might not want to block. 567 * - we can recurse when allocating radix_node for 568 * kernel_object. 569 */ 570 if (rw_tryenter(uvm_kernel_object->vmobjlock, RW_READER)) { 571 struct vm_page *pg; 572 573 pg = uvm_pagelookup(uvm_kernel_object, 574 va - vm_map_min(kernel_map)); 575 rw_exit(uvm_kernel_object->vmobjlock); 576 if (pg) { 577 panic("uvm_km_check_empty: " 578 "has page hashed at %p", 579 (const void *)va); 580 } 581 } 582 } 583} 584#endif /* defined(DEBUG) */ 585 586/* 587 * uvm_km_alloc: allocate an area of kernel memory. 588 * 589 * => NOTE: we can return 0 even if we can wait if there is not enough 590 * free VM space in the map... caller should be prepared to handle 591 * this case. 592 * => we return KVA of memory allocated 593 */ 594 595vaddr_t 596uvm_km_alloc(struct vm_map *map, vsize_t size, vsize_t align, uvm_flag_t flags) 597{ 598 vaddr_t kva, loopva; 599 vaddr_t offset; 600 vsize_t loopsize; 601 struct vm_page *pg; 602 struct uvm_object *obj; 603 int pgaflags; 604 vm_prot_t prot, vaprot; 605 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 606 607 KASSERT(vm_map_pmap(map) == pmap_kernel()); 608 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 609 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 610 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 611 KASSERT((flags & UVM_KMF_VAONLY) != 0 || (flags & UVM_KMF_COLORMATCH) == 0); 612 KASSERT((flags & UVM_KMF_COLORMATCH) == 0 || (flags & UVM_KMF_VAONLY) != 0); 613 614 /* 615 * setup for call 616 */ 617 618 kva = vm_map_min(map); /* hint */ 619 size = round_page(size); 620 obj = (flags & UVM_KMF_PAGEABLE) ? uvm_kernel_object : NULL; 621 UVMHIST_LOG(maphist," (map=%#jx, obj=%#jx, size=%#jx, flags=%jd)", 622 (uintptr_t)map, (uintptr_t)obj, size, flags); 623 624 /* 625 * allocate some virtual space 626 */ 627 628 vaprot = (flags & UVM_KMF_EXEC) ? UVM_PROT_ALL : UVM_PROT_RW; 629 if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 630 align, UVM_MAPFLAG(vaprot, UVM_PROT_ALL, UVM_INH_NONE, 631 UVM_ADV_RANDOM, 632 (flags & (UVM_KMF_TRYLOCK | UVM_KMF_NOWAIT | UVM_KMF_WAITVA 633 | UVM_KMF_COLORMATCH)))) != 0)) { 634 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 635 return(0); 636 } 637 638 /* 639 * if all we wanted was VA, return now 640 */ 641 642 if (flags & (UVM_KMF_VAONLY | UVM_KMF_PAGEABLE)) { 643 UVMHIST_LOG(maphist,"<- done valloc (kva=%#jx)", kva,0,0,0); 644 return(kva); 645 } 646 647 /* 648 * recover object offset from virtual address 649 */ 650 651 offset = kva - vm_map_min(kernel_map); 652 UVMHIST_LOG(maphist, " kva=%#jx, offset=%#jx", kva, offset,0,0); 653 654 /* 655 * now allocate and map in the memory... note that we are the only ones 656 * whom should ever get a handle on this area of VM. 657 */ 658 659 loopva = kva; 660 loopsize = size; 661 662 pgaflags = UVM_FLAG_COLORMATCH; 663 if (flags & UVM_KMF_NOWAIT) 664 pgaflags |= UVM_PGA_USERESERVE; 665 if (flags & UVM_KMF_ZERO) 666 pgaflags |= UVM_PGA_ZERO; 667 prot = VM_PROT_READ | VM_PROT_WRITE; 668 if (flags & UVM_KMF_EXEC) 669 prot |= VM_PROT_EXECUTE; 670 while (loopsize) { 671 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, NULL), 672 "loopva=%#"PRIxVADDR, loopva); 673 674 pg = uvm_pagealloc_strat(NULL, offset, NULL, pgaflags, 675#ifdef UVM_KM_VMFREELIST 676 UVM_PGA_STRAT_ONLY, UVM_KM_VMFREELIST 677#else 678 UVM_PGA_STRAT_NORMAL, 0 679#endif 680 ); 681 682 /* 683 * out of memory? 684 */ 685 686 if (__predict_false(pg == NULL)) { 687 if ((flags & UVM_KMF_NOWAIT) || 688 ((flags & UVM_KMF_CANFAIL) && !uvm_reclaimable())) { 689 /* free everything! */ 690 uvm_km_free(map, kva, size, 691 flags & UVM_KMF_TYPEMASK); 692 return (0); 693 } else { 694 uvm_wait("km_getwait2"); /* sleep here */ 695 continue; 696 } 697 } 698 699 pg->flags &= ~PG_BUSY; /* new page */ 700 UVM_PAGE_OWN(pg, NULL); 701 702 /* 703 * map it in 704 */ 705 706 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 707 prot, PMAP_KMPAGE); 708 loopva += PAGE_SIZE; 709 offset += PAGE_SIZE; 710 loopsize -= PAGE_SIZE; 711 } 712 713 pmap_update(pmap_kernel()); 714 715 if ((flags & UVM_KMF_ZERO) == 0) { 716 kmsan_orig((void *)kva, size, KMSAN_TYPE_UVM, __RET_ADDR); 717 kmsan_mark((void *)kva, size, KMSAN_STATE_UNINIT); 718 } 719 720 UVMHIST_LOG(maphist,"<- done (kva=%#jx)", kva,0,0,0); 721 return(kva); 722} 723 724/* 725 * uvm_km_protect: change the protection of an allocated area 726 */ 727 728int 729uvm_km_protect(struct vm_map *map, vaddr_t addr, vsize_t size, vm_prot_t prot) 730{ 731 return uvm_map_protect(map, addr, addr + round_page(size), prot, false); 732} 733 734/* 735 * uvm_km_free: free an area of kernel memory 736 */ 737 738void 739uvm_km_free(struct vm_map *map, vaddr_t addr, vsize_t size, uvm_flag_t flags) 740{ 741 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 742 743 KASSERT((flags & UVM_KMF_TYPEMASK) == UVM_KMF_WIRED || 744 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_PAGEABLE || 745 (flags & UVM_KMF_TYPEMASK) == UVM_KMF_VAONLY); 746 KASSERT((addr & PAGE_MASK) == 0); 747 KASSERT(vm_map_pmap(map) == pmap_kernel()); 748 749 size = round_page(size); 750 751 if (flags & UVM_KMF_PAGEABLE) { 752 uvm_km_pgremove(addr, addr + size); 753 } else if (flags & UVM_KMF_WIRED) { 754 /* 755 * Note: uvm_km_pgremove_intrsafe() extracts mapping, thus 756 * remove it after. See comment below about KVA visibility. 757 */ 758 uvm_km_pgremove_intrsafe(map, addr, addr + size); 759 } 760 761 /* 762 * Note: uvm_unmap_remove() calls pmap_update() for us, before 763 * KVA becomes globally available. 764 */ 765 766 uvm_unmap1(map, addr, addr + size, UVM_FLAG_VAONLY); 767} 768 769/* Sanity; must specify both or none. */ 770#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 771 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 772#error Must specify MAP and UNMAP together. 773#endif 774 775#if defined(PMAP_ALLOC_POOLPAGE) && \ 776 !defined(PMAP_MAP_POOLPAGE) && !defined(PMAP_UNMAP_POOLPAGE) 777#error Must specify ALLOC with MAP and UNMAP 778#endif 779 780int 781uvm_km_kmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, 782 vmem_addr_t *addr) 783{ 784 struct vm_page *pg; 785 vmem_addr_t va; 786 int rc; 787 vaddr_t loopva; 788 vsize_t loopsize; 789 790 size = round_page(size); 791 792#if defined(PMAP_MAP_POOLPAGE) 793 if (size == PAGE_SIZE) { 794again: 795#ifdef PMAP_ALLOC_POOLPAGE 796 pg = PMAP_ALLOC_POOLPAGE((flags & VM_SLEEP) ? 797 0 : UVM_PGA_USERESERVE); 798#else 799 pg = uvm_pagealloc(NULL, 0, NULL, 800 (flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE); 801#endif /* PMAP_ALLOC_POOLPAGE */ 802 if (__predict_false(pg == NULL)) { 803 if (flags & VM_SLEEP) { 804 uvm_wait("plpg"); 805 goto again; 806 } 807 return ENOMEM; 808 } 809 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 810 KASSERT(va != 0); 811 *addr = va; 812 return 0; 813 } 814#endif /* PMAP_MAP_POOLPAGE */ 815 816 rc = vmem_alloc(vm, size, flags, &va); 817 if (rc != 0) 818 return rc; 819 820#ifdef PMAP_GROWKERNEL 821 /* 822 * These VA allocations happen independently of uvm_map 823 * so this allocation must not extend beyond the current limit. 824 */ 825 KASSERTMSG(uvm_maxkaddr >= va + size, 826 "%#"PRIxVADDR" %#"PRIxPTR" %#zx", 827 uvm_maxkaddr, va, size); 828#endif 829 830 loopva = va; 831 loopsize = size; 832 833 while (loopsize) { 834 paddr_t pa __diagused; 835 KASSERTMSG(!pmap_extract(pmap_kernel(), loopva, &pa), 836 "loopva=%#"PRIxVADDR" loopsize=%#"PRIxVSIZE 837 " pa=%#"PRIxPADDR" vmem=%p", 838 loopva, loopsize, pa, vm); 839 840 pg = uvm_pagealloc(NULL, loopva, NULL, 841 UVM_FLAG_COLORMATCH 842 | ((flags & VM_SLEEP) ? 0 : UVM_PGA_USERESERVE)); 843 if (__predict_false(pg == NULL)) { 844 if (flags & VM_SLEEP) { 845 uvm_wait("plpg"); 846 continue; 847 } else { 848 uvm_km_pgremove_intrsafe(kernel_map, va, 849 va + size); 850 vmem_free(vm, va, size); 851 return ENOMEM; 852 } 853 } 854 855 pg->flags &= ~PG_BUSY; /* new page */ 856 UVM_PAGE_OWN(pg, NULL); 857 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 858 VM_PROT_READ|VM_PROT_WRITE, PMAP_KMPAGE); 859 860 loopva += PAGE_SIZE; 861 loopsize -= PAGE_SIZE; 862 } 863 pmap_update(pmap_kernel()); 864 865 *addr = va; 866 867 return 0; 868} 869 870void 871uvm_km_kmem_free(vmem_t *vm, vmem_addr_t addr, size_t size) 872{ 873 874 size = round_page(size); 875#if defined(PMAP_UNMAP_POOLPAGE) 876 if (size == PAGE_SIZE) { 877 paddr_t pa; 878 879 pa = PMAP_UNMAP_POOLPAGE(addr); 880 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 881 return; 882 } 883#endif /* PMAP_UNMAP_POOLPAGE */ 884 uvm_km_pgremove_intrsafe(kernel_map, addr, addr + size); 885 pmap_update(pmap_kernel()); 886 887 vmem_free(vm, addr, size); 888} 889 890bool 891uvm_km_va_starved_p(void) 892{ 893 vmem_size_t total; 894 vmem_size_t free; 895 896 if (kmem_arena == NULL) 897 return false; 898 899 total = vmem_size(kmem_arena, VMEM_ALLOC|VMEM_FREE); 900 free = vmem_size(kmem_arena, VMEM_FREE); 901 902 return (free < (total / 10)); 903} 904