uvm_km.c revision 1.31
1/* $NetBSD: uvm_km.c,v 1.31 1999/07/22 22:58:38 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69#include "opt_uvmhist.h" 70#include "opt_pmap_new.h" 71 72/* 73 * uvm_km.c: handle kernel memory allocation and management 74 */ 75 76/* 77 * overview of kernel memory management: 78 * 79 * the kernel virtual address space is mapped by "kernel_map." kernel_map 80 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 81 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 82 * 83 * the kernel_map has several "submaps." submaps can only appear in 84 * the kernel_map (user processes can't use them). submaps "take over" 85 * the management of a sub-range of the kernel's address space. submaps 86 * are typically allocated at boot time and are never released. kernel 87 * virtual address space that is mapped by a submap is locked by the 88 * submap's lock -- not the kernel_map's lock. 89 * 90 * thus, the useful feature of submaps is that they allow us to break 91 * up the locking and protection of the kernel address space into smaller 92 * chunks. 93 * 94 * the vm system has several standard kernel submaps, including: 95 * kmem_map => contains only wired kernel memory for the kernel 96 * malloc. *** access to kmem_map must be protected 97 * by splimp() because we are allowed to call malloc() 98 * at interrupt time *** 99 * mb_map => memory for large mbufs, *** protected by splimp *** 100 * pager_map => used to map "buf" structures into kernel space 101 * exec_map => used during exec to handle exec args 102 * etc... 103 * 104 * the kernel allocates its private memory out of special uvm_objects whose 105 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 106 * are "special" and never die). all kernel objects should be thought of 107 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 108 * object is equal to the size of kernel virtual address space (i.e. the 109 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 110 * 111 * most kernel private memory lives in kernel_object. the only exception 112 * to this is for memory that belongs to submaps that must be protected 113 * by splimp(). each of these submaps has their own private kernel 114 * object (e.g. kmem_object, mb_object). 115 * 116 * note that just because a kernel object spans the entire kernel virutal 117 * address space doesn't mean that it has to be mapped into the entire space. 118 * large chunks of a kernel object's space go unused either because 119 * that area of kernel VM is unmapped, or there is some other type of 120 * object mapped into that range (e.g. a vnode). for submap's kernel 121 * objects, the only part of the object that can ever be populated is the 122 * offsets that are managed by the submap. 123 * 124 * note that the "offset" in a kernel object is always the kernel virtual 125 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 126 * example: 127 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 128 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 129 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 130 * then that means that the page at offset 0x235000 in kernel_object is 131 * mapped at 0xf8235000. 132 * 133 * note that the offsets in kmem_object and mb_object also follow this 134 * rule. this means that the offsets for kmem_object must fall in the 135 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 136 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 137 * in those objects will typically not start at zero. 138 * 139 * kernel object have one other special property: when the kernel virtual 140 * memory mapping them is unmapped, the backing memory in the object is 141 * freed right away. this is done with the uvm_km_pgremove() function. 142 * this has to be done because there is no backing store for kernel pages 143 * and no need to save them after they are no longer referenced. 144 */ 145 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/proc.h> 149 150#include <vm/vm.h> 151#include <vm/vm_page.h> 152#include <vm/vm_kern.h> 153 154#include <uvm/uvm.h> 155 156/* 157 * global data structures 158 */ 159 160vm_map_t kernel_map = NULL; 161 162struct vmi_list vmi_list; 163simple_lock_data_t vmi_list_slock; 164 165/* 166 * local data structues 167 */ 168 169static struct vm_map kernel_map_store; 170static struct uvm_object kmem_object_store; 171static struct uvm_object mb_object_store; 172 173/* 174 * All pager operations here are NULL, but the object must have 175 * a pager ops vector associated with it; various places assume 176 * it to be so. 177 */ 178static struct uvm_pagerops km_pager; 179 180/* 181 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 182 * KVM already allocated for text, data, bss, and static data structures). 183 * 184 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 185 * we assume that [min -> start] has already been allocated and that 186 * "end" is the end. 187 */ 188 189void 190uvm_km_init(start, end) 191 vaddr_t start, end; 192{ 193 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 194 195 /* 196 * first, initialize the interrupt-safe map list. 197 */ 198 LIST_INIT(&vmi_list); 199 simple_lock_init(&vmi_list_slock); 200 201 /* 202 * next, init kernel memory objects. 203 */ 204 205 /* kernel_object: for pageable anonymous kernel memory */ 206 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 207 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 208 209 /* 210 * kmem_object: for use by the kernel malloc(). Memory is always 211 * wired, and this object (and the kmem_map) can be accessed at 212 * interrupt time. 213 */ 214 simple_lock_init(&kmem_object_store.vmobjlock); 215 kmem_object_store.pgops = &km_pager; 216 TAILQ_INIT(&kmem_object_store.memq); 217 kmem_object_store.uo_npages = 0; 218 /* we are special. we never die */ 219 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 220 uvmexp.kmem_object = &kmem_object_store; 221 222 /* 223 * mb_object: for mbuf cluster pages on platforms which use the 224 * mb_map. Memory is always wired, and this object (and the mb_map) 225 * can be accessed at interrupt time. 226 */ 227 simple_lock_init(&mb_object_store.vmobjlock); 228 mb_object_store.pgops = &km_pager; 229 TAILQ_INIT(&mb_object_store.memq); 230 mb_object_store.uo_npages = 0; 231 /* we are special. we never die */ 232 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 233 uvmexp.mb_object = &mb_object_store; 234 235 /* 236 * init the map and reserve allready allocated kernel space 237 * before installing. 238 */ 239 240 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 241 kernel_map_store.pmap = pmap_kernel(); 242 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 243 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 244 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 245 panic("uvm_km_init: could not reserve space for kernel"); 246 247 /* 248 * install! 249 */ 250 251 kernel_map = &kernel_map_store; 252} 253 254/* 255 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 256 * is allocated all references to that area of VM must go through it. this 257 * allows the locking of VAs in kernel_map to be broken up into regions. 258 * 259 * => if `fixed' is true, *min specifies where the region described 260 * by the submap must start 261 * => if submap is non NULL we use that as the submap, otherwise we 262 * alloc a new map 263 */ 264struct vm_map * 265uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 266 struct vm_map *map; 267 vaddr_t *min, *max; /* OUT, OUT */ 268 vsize_t size; 269 int flags; 270 boolean_t fixed; 271 struct vm_map *submap; 272{ 273 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 274 275 size = round_page(size); /* round up to pagesize */ 276 277 /* 278 * first allocate a blank spot in the parent map 279 */ 280 281 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 282 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 283 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 284 panic("uvm_km_suballoc: unable to allocate space in parent map"); 285 } 286 287 /* 288 * set VM bounds (min is filled in by uvm_map) 289 */ 290 291 *max = *min + size; 292 293 /* 294 * add references to pmap and create or init the submap 295 */ 296 297 pmap_reference(vm_map_pmap(map)); 298 if (submap == NULL) { 299 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 300 if (submap == NULL) 301 panic("uvm_km_suballoc: unable to create submap"); 302 } else { 303 uvm_map_setup(submap, *min, *max, flags); 304 submap->pmap = vm_map_pmap(map); 305 } 306 307 /* 308 * now let uvm_map_submap plug in it... 309 */ 310 311 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 312 panic("uvm_km_suballoc: submap allocation failed"); 313 314 return(submap); 315} 316 317/* 318 * uvm_km_pgremove: remove pages from a kernel uvm_object. 319 * 320 * => when you unmap a part of anonymous kernel memory you want to toss 321 * the pages right away. (this gets called from uvm_unmap_...). 322 */ 323 324#define UKM_HASH_PENALTY 4 /* a guess */ 325 326void 327uvm_km_pgremove(uobj, start, end) 328 struct uvm_object *uobj; 329 vaddr_t start, end; 330{ 331 boolean_t by_list; 332 struct vm_page *pp, *ppnext; 333 vaddr_t curoff; 334 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 335 336 simple_lock(&uobj->vmobjlock); /* lock object */ 337 338#ifdef DIAGNOSTIC 339 if (uobj->pgops != &aobj_pager) 340 panic("uvm_km_pgremove: object %p not an aobj", uobj); 341#endif 342 343 /* choose cheapest traversal */ 344 by_list = (uobj->uo_npages <= 345 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 346 347 if (by_list) 348 goto loop_by_list; 349 350 /* by hash */ 351 352 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 353 pp = uvm_pagelookup(uobj, curoff); 354 if (pp == NULL) 355 continue; 356 357 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 358 pp->flags & PG_BUSY, 0, 0); 359 360 /* now do the actual work */ 361 if (pp->flags & PG_BUSY) { 362 /* owner must check for this when done */ 363 pp->flags |= PG_RELEASED; 364 } else { 365 /* free the swap slot... */ 366 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 367 368 /* 369 * ...and free the page; note it may be on the 370 * active or inactive queues. 371 */ 372 uvm_lock_pageq(); 373 uvm_pagefree(pp); 374 uvm_unlock_pageq(); 375 } 376 /* done */ 377 } 378 simple_unlock(&uobj->vmobjlock); 379 return; 380 381loop_by_list: 382 383 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 384 ppnext = pp->listq.tqe_next; 385 if (pp->offset < start || pp->offset >= end) { 386 continue; 387 } 388 389 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 390 pp->flags & PG_BUSY, 0, 0); 391 392 /* now do the actual work */ 393 if (pp->flags & PG_BUSY) { 394 /* owner must check for this when done */ 395 pp->flags |= PG_RELEASED; 396 } else { 397 /* free the swap slot... */ 398 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 399 400 /* 401 * ...and free the page; note it may be on the 402 * active or inactive queues. 403 */ 404 uvm_lock_pageq(); 405 uvm_pagefree(pp); 406 uvm_unlock_pageq(); 407 } 408 /* done */ 409 } 410 simple_unlock(&uobj->vmobjlock); 411 return; 412} 413 414 415/* 416 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 417 * objects 418 * 419 * => when you unmap a part of anonymous kernel memory you want to toss 420 * the pages right away. (this gets called from uvm_unmap_...). 421 * => none of the pages will ever be busy, and none of them will ever 422 * be on the active or inactive queues (because these objects are 423 * never allowed to "page"). 424 */ 425 426void 427uvm_km_pgremove_intrsafe(uobj, start, end) 428 struct uvm_object *uobj; 429 vaddr_t start, end; 430{ 431 boolean_t by_list; 432 struct vm_page *pp, *ppnext; 433 vaddr_t curoff; 434 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 435 436 simple_lock(&uobj->vmobjlock); /* lock object */ 437 438#ifdef DIAGNOSTIC 439 if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0) 440 panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj); 441#endif 442 443 /* choose cheapest traversal */ 444 by_list = (uobj->uo_npages <= 445 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 446 447 if (by_list) 448 goto loop_by_list; 449 450 /* by hash */ 451 452 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 453 pp = uvm_pagelookup(uobj, curoff); 454 if (pp == NULL) 455 continue; 456 457 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 458 pp->flags & PG_BUSY, 0, 0); 459#ifdef DIAGNOSTIC 460 if (pp->flags & PG_BUSY) 461 panic("uvm_km_pgremove_intrsafe: busy page"); 462 if (pp->pqflags & PQ_ACTIVE) 463 panic("uvm_km_pgremove_intrsafe: active page"); 464 if (pp->pqflags & PQ_INACTIVE) 465 panic("uvm_km_pgremove_intrsafe: inactive page"); 466#endif 467 468 /* free the page */ 469 uvm_pagefree(pp); 470 } 471 simple_unlock(&uobj->vmobjlock); 472 return; 473 474loop_by_list: 475 476 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 477 ppnext = pp->listq.tqe_next; 478 if (pp->offset < start || pp->offset >= end) { 479 continue; 480 } 481 482 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 483 pp->flags & PG_BUSY, 0, 0); 484 485#ifdef DIAGNOSTIC 486 if (pp->flags & PG_BUSY) 487 panic("uvm_km_pgremove_intrsafe: busy page"); 488 if (pp->pqflags & PQ_ACTIVE) 489 panic("uvm_km_pgremove_intrsafe: active page"); 490 if (pp->pqflags & PQ_INACTIVE) 491 panic("uvm_km_pgremove_intrsafe: inactive page"); 492#endif 493 494 /* free the page */ 495 uvm_pagefree(pp); 496 } 497 simple_unlock(&uobj->vmobjlock); 498 return; 499} 500 501 502/* 503 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 504 * 505 * => we map wired memory into the specified map using the obj passed in 506 * => NOTE: we can return NULL even if we can wait if there is not enough 507 * free VM space in the map... caller should be prepared to handle 508 * this case. 509 * => we return KVA of memory allocated 510 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 511 * lock the map 512 */ 513 514vaddr_t 515uvm_km_kmemalloc(map, obj, size, flags) 516 vm_map_t map; 517 struct uvm_object *obj; 518 vsize_t size; 519 int flags; 520{ 521 vaddr_t kva, loopva; 522 vaddr_t offset; 523 struct vm_page *pg; 524 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 525 526 527 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 528 map, obj, size, flags); 529#ifdef DIAGNOSTIC 530 /* sanity check */ 531 if (vm_map_pmap(map) != pmap_kernel()) 532 panic("uvm_km_kmemalloc: invalid map"); 533#endif 534 535 /* 536 * setup for call 537 */ 538 539 size = round_page(size); 540 kva = vm_map_min(map); /* hint */ 541 542 /* 543 * allocate some virtual space 544 */ 545 546 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 547 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 548 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 549 != KERN_SUCCESS) { 550 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 551 return(0); 552 } 553 554 /* 555 * if all we wanted was VA, return now 556 */ 557 558 if (flags & UVM_KMF_VALLOC) { 559 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 560 return(kva); 561 } 562 /* 563 * recover object offset from virtual address 564 */ 565 566 offset = kva - vm_map_min(kernel_map); 567 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 568 569 /* 570 * now allocate and map in the memory... note that we are the only ones 571 * whom should ever get a handle on this area of VM. 572 */ 573 574 loopva = kva; 575 while (size) { 576 simple_lock(&obj->vmobjlock); 577 pg = uvm_pagealloc(obj, offset, NULL, 0); 578 if (pg) { 579 pg->flags &= ~PG_BUSY; /* new page */ 580 UVM_PAGE_OWN(pg, NULL); 581 } 582 simple_unlock(&obj->vmobjlock); 583 584 /* 585 * out of memory? 586 */ 587 588 if (pg == NULL) { 589 if (flags & UVM_KMF_NOWAIT) { 590 /* free everything! */ 591 uvm_unmap(map, kva, kva + size); 592 return(0); 593 } else { 594 uvm_wait("km_getwait2"); /* sleep here */ 595 continue; 596 } 597 } 598 599 /* 600 * map it in: note that we call pmap_enter with the map and 601 * object unlocked in case we are kmem_map/kmem_object 602 * (because if pmap_enter wants to allocate out of kmem_object 603 * it will need to lock it itself!) 604 */ 605 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { 606#if defined(PMAP_NEW) 607 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 608 VM_PROT_ALL); 609#else 610 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 611 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 612#endif 613 } else { 614 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 615 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 616 } 617 loopva += PAGE_SIZE; 618 offset += PAGE_SIZE; 619 size -= PAGE_SIZE; 620 } 621 622 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 623 return(kva); 624} 625 626/* 627 * uvm_km_free: free an area of kernel memory 628 */ 629 630void 631uvm_km_free(map, addr, size) 632 vm_map_t map; 633 vaddr_t addr; 634 vsize_t size; 635{ 636 637 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 638} 639 640/* 641 * uvm_km_free_wakeup: free an area of kernel memory and wake up 642 * anyone waiting for vm space. 643 * 644 * => XXX: "wanted" bit + unlock&wait on other end? 645 */ 646 647void 648uvm_km_free_wakeup(map, addr, size) 649 vm_map_t map; 650 vaddr_t addr; 651 vsize_t size; 652{ 653 vm_map_entry_t dead_entries; 654 655 vm_map_lock(map); 656 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 657 &dead_entries); 658 wakeup(map); 659 vm_map_unlock(map); 660 661 if (dead_entries != NULL) 662 uvm_unmap_detach(dead_entries, 0); 663} 664 665/* 666 * uvm_km_alloc1: allocate wired down memory in the kernel map. 667 * 668 * => we can sleep if needed 669 */ 670 671vaddr_t 672uvm_km_alloc1(map, size, zeroit) 673 vm_map_t map; 674 vsize_t size; 675 boolean_t zeroit; 676{ 677 vaddr_t kva, loopva, offset; 678 struct vm_page *pg; 679 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 680 681 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 682 683#ifdef DIAGNOSTIC 684 if (vm_map_pmap(map) != pmap_kernel()) 685 panic("uvm_km_alloc1"); 686#endif 687 688 size = round_page(size); 689 kva = vm_map_min(map); /* hint */ 690 691 /* 692 * allocate some virtual space 693 */ 694 695 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 696 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 697 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 698 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 699 return(0); 700 } 701 702 /* 703 * recover object offset from virtual address 704 */ 705 706 offset = kva - vm_map_min(kernel_map); 707 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 708 709 /* 710 * now allocate the memory. we must be careful about released pages. 711 */ 712 713 loopva = kva; 714 while (size) { 715 simple_lock(&uvm.kernel_object->vmobjlock); 716 pg = uvm_pagelookup(uvm.kernel_object, offset); 717 718 /* 719 * if we found a page in an unallocated region, it must be 720 * released 721 */ 722 if (pg) { 723 if ((pg->flags & PG_RELEASED) == 0) 724 panic("uvm_km_alloc1: non-released page"); 725 pg->flags |= PG_WANTED; 726 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 727 FALSE, "km_alloc", 0); 728 continue; /* retry */ 729 } 730 731 /* allocate ram */ 732 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 733 if (pg) { 734 pg->flags &= ~PG_BUSY; /* new page */ 735 UVM_PAGE_OWN(pg, NULL); 736 } 737 simple_unlock(&uvm.kernel_object->vmobjlock); 738 if (pg == NULL) { 739 uvm_wait("km_alloc1w"); /* wait for memory */ 740 continue; 741 } 742 743 /* 744 * map it in; note we're never called with an intrsafe 745 * object, so we always use regular old pmap_enter(). 746 */ 747 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 748 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 749 750 loopva += PAGE_SIZE; 751 offset += PAGE_SIZE; 752 size -= PAGE_SIZE; 753 } 754 755 /* 756 * zero on request (note that "size" is now zero due to the above loop 757 * so we need to subtract kva from loopva to reconstruct the size). 758 */ 759 760 if (zeroit) 761 memset((caddr_t)kva, 0, loopva - kva); 762 763 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 764 return(kva); 765} 766 767/* 768 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 769 * 770 * => memory is not allocated until fault time 771 */ 772 773vaddr_t 774uvm_km_valloc(map, size) 775 vm_map_t map; 776 vsize_t size; 777{ 778 vaddr_t kva; 779 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 780 781 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 782 783#ifdef DIAGNOSTIC 784 if (vm_map_pmap(map) != pmap_kernel()) 785 panic("uvm_km_valloc"); 786#endif 787 788 size = round_page(size); 789 kva = vm_map_min(map); /* hint */ 790 791 /* 792 * allocate some virtual space. will be demand filled by kernel_object. 793 */ 794 795 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 796 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 797 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 798 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 799 return(0); 800 } 801 802 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 803 return(kva); 804} 805 806/* 807 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 808 * 809 * => memory is not allocated until fault time 810 * => if no room in map, wait for space to free, unless requested size 811 * is larger than map (in which case we return 0) 812 */ 813 814vaddr_t 815uvm_km_valloc_wait(map, size) 816 vm_map_t map; 817 vsize_t size; 818{ 819 vaddr_t kva; 820 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist); 821 822 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 823 824#ifdef DIAGNOSTIC 825 if (vm_map_pmap(map) != pmap_kernel()) 826 panic("uvm_km_valloc_wait"); 827#endif 828 829 size = round_page(size); 830 if (size > vm_map_max(map) - vm_map_min(map)) 831 return(0); 832 833 while (1) { 834 kva = vm_map_min(map); /* hint */ 835 836 /* 837 * allocate some virtual space. will be demand filled 838 * by kernel_object. 839 */ 840 841 if (uvm_map(map, &kva, size, uvm.kernel_object, 842 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, 843 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 844 == KERN_SUCCESS) { 845 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 846 return(kva); 847 } 848 849 /* 850 * failed. sleep for a while (on map) 851 */ 852 853 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 854 tsleep((caddr_t)map, PVM, "vallocwait", 0); 855 } 856 /*NOTREACHED*/ 857} 858 859/* Sanity; must specify both or none. */ 860#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 861 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 862#error Must specify MAP and UNMAP together. 863#endif 864 865/* 866 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 867 * 868 * => if the pmap specifies an alternate mapping method, we use it. 869 */ 870 871/* ARGSUSED */ 872vaddr_t 873uvm_km_alloc_poolpage1(map, obj, waitok) 874 vm_map_t map; 875 struct uvm_object *obj; 876 boolean_t waitok; 877{ 878#if defined(PMAP_MAP_POOLPAGE) 879 struct vm_page *pg; 880 vaddr_t va; 881 882 again: 883 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 884 if (pg == NULL) { 885 if (waitok) { 886 uvm_wait("plpg"); 887 goto again; 888 } else 889 return (0); 890 } 891 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 892 if (va == 0) 893 uvm_pagefree(pg); 894 return (va); 895#else 896 vaddr_t va; 897 int s; 898 899 /* 900 * NOTE: We may be called with a map that doens't require splimp 901 * protection (e.g. kernel_map). However, it does not hurt to 902 * go to splimp in this case (since unprocted maps will never be 903 * accessed in interrupt context). 904 * 905 * XXX We may want to consider changing the interface to this 906 * XXX function. 907 */ 908 909 s = splimp(); 910 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 911 splx(s); 912 return (va); 913#endif /* PMAP_MAP_POOLPAGE */ 914} 915 916/* 917 * uvm_km_free_poolpage: free a previously allocated pool page 918 * 919 * => if the pmap specifies an alternate unmapping method, we use it. 920 */ 921 922/* ARGSUSED */ 923void 924uvm_km_free_poolpage1(map, addr) 925 vm_map_t map; 926 vaddr_t addr; 927{ 928#if defined(PMAP_UNMAP_POOLPAGE) 929 paddr_t pa; 930 931 pa = PMAP_UNMAP_POOLPAGE(addr); 932 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 933#else 934 int s; 935 936 /* 937 * NOTE: We may be called with a map that doens't require splimp 938 * protection (e.g. kernel_map). However, it does not hurt to 939 * go to splimp in this case (since unprocted maps will never be 940 * accessed in interrupt context). 941 * 942 * XXX We may want to consider changing the interface to this 943 * XXX function. 944 */ 945 946 s = splimp(); 947 uvm_km_free(map, addr, PAGE_SIZE); 948 splx(s); 949#endif /* PMAP_UNMAP_POOLPAGE */ 950} 951