uvm_km.c revision 1.32
1/* $NetBSD: uvm_km.c,v 1.32 1999/09/12 01:17:36 chs Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69#include "opt_uvmhist.h" 70 71/* 72 * uvm_km.c: handle kernel memory allocation and management 73 */ 74 75/* 76 * overview of kernel memory management: 77 * 78 * the kernel virtual address space is mapped by "kernel_map." kernel_map 79 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 80 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 81 * 82 * the kernel_map has several "submaps." submaps can only appear in 83 * the kernel_map (user processes can't use them). submaps "take over" 84 * the management of a sub-range of the kernel's address space. submaps 85 * are typically allocated at boot time and are never released. kernel 86 * virtual address space that is mapped by a submap is locked by the 87 * submap's lock -- not the kernel_map's lock. 88 * 89 * thus, the useful feature of submaps is that they allow us to break 90 * up the locking and protection of the kernel address space into smaller 91 * chunks. 92 * 93 * the vm system has several standard kernel submaps, including: 94 * kmem_map => contains only wired kernel memory for the kernel 95 * malloc. *** access to kmem_map must be protected 96 * by splimp() because we are allowed to call malloc() 97 * at interrupt time *** 98 * mb_map => memory for large mbufs, *** protected by splimp *** 99 * pager_map => used to map "buf" structures into kernel space 100 * exec_map => used during exec to handle exec args 101 * etc... 102 * 103 * the kernel allocates its private memory out of special uvm_objects whose 104 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 105 * are "special" and never die). all kernel objects should be thought of 106 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 107 * object is equal to the size of kernel virtual address space (i.e. the 108 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 109 * 110 * most kernel private memory lives in kernel_object. the only exception 111 * to this is for memory that belongs to submaps that must be protected 112 * by splimp(). each of these submaps has their own private kernel 113 * object (e.g. kmem_object, mb_object). 114 * 115 * note that just because a kernel object spans the entire kernel virutal 116 * address space doesn't mean that it has to be mapped into the entire space. 117 * large chunks of a kernel object's space go unused either because 118 * that area of kernel VM is unmapped, or there is some other type of 119 * object mapped into that range (e.g. a vnode). for submap's kernel 120 * objects, the only part of the object that can ever be populated is the 121 * offsets that are managed by the submap. 122 * 123 * note that the "offset" in a kernel object is always the kernel virtual 124 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 125 * example: 126 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 127 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 128 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 129 * then that means that the page at offset 0x235000 in kernel_object is 130 * mapped at 0xf8235000. 131 * 132 * note that the offsets in kmem_object and mb_object also follow this 133 * rule. this means that the offsets for kmem_object must fall in the 134 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 135 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 136 * in those objects will typically not start at zero. 137 * 138 * kernel object have one other special property: when the kernel virtual 139 * memory mapping them is unmapped, the backing memory in the object is 140 * freed right away. this is done with the uvm_km_pgremove() function. 141 * this has to be done because there is no backing store for kernel pages 142 * and no need to save them after they are no longer referenced. 143 */ 144 145#include <sys/param.h> 146#include <sys/systm.h> 147#include <sys/proc.h> 148 149#include <vm/vm.h> 150#include <vm/vm_page.h> 151#include <vm/vm_kern.h> 152 153#include <uvm/uvm.h> 154 155/* 156 * global data structures 157 */ 158 159vm_map_t kernel_map = NULL; 160 161struct vmi_list vmi_list; 162simple_lock_data_t vmi_list_slock; 163 164/* 165 * local data structues 166 */ 167 168static struct vm_map kernel_map_store; 169static struct uvm_object kmem_object_store; 170static struct uvm_object mb_object_store; 171 172/* 173 * All pager operations here are NULL, but the object must have 174 * a pager ops vector associated with it; various places assume 175 * it to be so. 176 */ 177static struct uvm_pagerops km_pager; 178 179/* 180 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 181 * KVM already allocated for text, data, bss, and static data structures). 182 * 183 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 184 * we assume that [min -> start] has already been allocated and that 185 * "end" is the end. 186 */ 187 188void 189uvm_km_init(start, end) 190 vaddr_t start, end; 191{ 192 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 193 194 /* 195 * first, initialize the interrupt-safe map list. 196 */ 197 LIST_INIT(&vmi_list); 198 simple_lock_init(&vmi_list_slock); 199 200 /* 201 * next, init kernel memory objects. 202 */ 203 204 /* kernel_object: for pageable anonymous kernel memory */ 205 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 206 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 207 208 /* 209 * kmem_object: for use by the kernel malloc(). Memory is always 210 * wired, and this object (and the kmem_map) can be accessed at 211 * interrupt time. 212 */ 213 simple_lock_init(&kmem_object_store.vmobjlock); 214 kmem_object_store.pgops = &km_pager; 215 TAILQ_INIT(&kmem_object_store.memq); 216 kmem_object_store.uo_npages = 0; 217 /* we are special. we never die */ 218 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 219 uvmexp.kmem_object = &kmem_object_store; 220 221 /* 222 * mb_object: for mbuf cluster pages on platforms which use the 223 * mb_map. Memory is always wired, and this object (and the mb_map) 224 * can be accessed at interrupt time. 225 */ 226 simple_lock_init(&mb_object_store.vmobjlock); 227 mb_object_store.pgops = &km_pager; 228 TAILQ_INIT(&mb_object_store.memq); 229 mb_object_store.uo_npages = 0; 230 /* we are special. we never die */ 231 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 232 uvmexp.mb_object = &mb_object_store; 233 234 /* 235 * init the map and reserve allready allocated kernel space 236 * before installing. 237 */ 238 239 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 240 kernel_map_store.pmap = pmap_kernel(); 241 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 242 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 243 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 244 panic("uvm_km_init: could not reserve space for kernel"); 245 246 /* 247 * install! 248 */ 249 250 kernel_map = &kernel_map_store; 251} 252 253/* 254 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 255 * is allocated all references to that area of VM must go through it. this 256 * allows the locking of VAs in kernel_map to be broken up into regions. 257 * 258 * => if `fixed' is true, *min specifies where the region described 259 * by the submap must start 260 * => if submap is non NULL we use that as the submap, otherwise we 261 * alloc a new map 262 */ 263struct vm_map * 264uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 265 struct vm_map *map; 266 vaddr_t *min, *max; /* OUT, OUT */ 267 vsize_t size; 268 int flags; 269 boolean_t fixed; 270 struct vm_map *submap; 271{ 272 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 273 274 size = round_page(size); /* round up to pagesize */ 275 276 /* 277 * first allocate a blank spot in the parent map 278 */ 279 280 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 281 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 282 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 283 panic("uvm_km_suballoc: unable to allocate space in parent map"); 284 } 285 286 /* 287 * set VM bounds (min is filled in by uvm_map) 288 */ 289 290 *max = *min + size; 291 292 /* 293 * add references to pmap and create or init the submap 294 */ 295 296 pmap_reference(vm_map_pmap(map)); 297 if (submap == NULL) { 298 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 299 if (submap == NULL) 300 panic("uvm_km_suballoc: unable to create submap"); 301 } else { 302 uvm_map_setup(submap, *min, *max, flags); 303 submap->pmap = vm_map_pmap(map); 304 } 305 306 /* 307 * now let uvm_map_submap plug in it... 308 */ 309 310 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 311 panic("uvm_km_suballoc: submap allocation failed"); 312 313 return(submap); 314} 315 316/* 317 * uvm_km_pgremove: remove pages from a kernel uvm_object. 318 * 319 * => when you unmap a part of anonymous kernel memory you want to toss 320 * the pages right away. (this gets called from uvm_unmap_...). 321 */ 322 323#define UKM_HASH_PENALTY 4 /* a guess */ 324 325void 326uvm_km_pgremove(uobj, start, end) 327 struct uvm_object *uobj; 328 vaddr_t start, end; 329{ 330 boolean_t by_list; 331 struct vm_page *pp, *ppnext; 332 vaddr_t curoff; 333 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 334 335 simple_lock(&uobj->vmobjlock); /* lock object */ 336 337#ifdef DIAGNOSTIC 338 if (uobj->pgops != &aobj_pager) 339 panic("uvm_km_pgremove: object %p not an aobj", uobj); 340#endif 341 342 /* choose cheapest traversal */ 343 by_list = (uobj->uo_npages <= 344 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 345 346 if (by_list) 347 goto loop_by_list; 348 349 /* by hash */ 350 351 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 352 pp = uvm_pagelookup(uobj, curoff); 353 if (pp == NULL) 354 continue; 355 356 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 357 pp->flags & PG_BUSY, 0, 0); 358 359 /* now do the actual work */ 360 if (pp->flags & PG_BUSY) { 361 /* owner must check for this when done */ 362 pp->flags |= PG_RELEASED; 363 } else { 364 /* free the swap slot... */ 365 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 366 367 /* 368 * ...and free the page; note it may be on the 369 * active or inactive queues. 370 */ 371 uvm_lock_pageq(); 372 uvm_pagefree(pp); 373 uvm_unlock_pageq(); 374 } 375 /* done */ 376 } 377 simple_unlock(&uobj->vmobjlock); 378 return; 379 380loop_by_list: 381 382 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 383 ppnext = pp->listq.tqe_next; 384 if (pp->offset < start || pp->offset >= end) { 385 continue; 386 } 387 388 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 389 pp->flags & PG_BUSY, 0, 0); 390 391 /* now do the actual work */ 392 if (pp->flags & PG_BUSY) { 393 /* owner must check for this when done */ 394 pp->flags |= PG_RELEASED; 395 } else { 396 /* free the swap slot... */ 397 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 398 399 /* 400 * ...and free the page; note it may be on the 401 * active or inactive queues. 402 */ 403 uvm_lock_pageq(); 404 uvm_pagefree(pp); 405 uvm_unlock_pageq(); 406 } 407 /* done */ 408 } 409 simple_unlock(&uobj->vmobjlock); 410 return; 411} 412 413 414/* 415 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 416 * objects 417 * 418 * => when you unmap a part of anonymous kernel memory you want to toss 419 * the pages right away. (this gets called from uvm_unmap_...). 420 * => none of the pages will ever be busy, and none of them will ever 421 * be on the active or inactive queues (because these objects are 422 * never allowed to "page"). 423 */ 424 425void 426uvm_km_pgremove_intrsafe(uobj, start, end) 427 struct uvm_object *uobj; 428 vaddr_t start, end; 429{ 430 boolean_t by_list; 431 struct vm_page *pp, *ppnext; 432 vaddr_t curoff; 433 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 434 435 simple_lock(&uobj->vmobjlock); /* lock object */ 436 437#ifdef DIAGNOSTIC 438 if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0) 439 panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj); 440#endif 441 442 /* choose cheapest traversal */ 443 by_list = (uobj->uo_npages <= 444 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 445 446 if (by_list) 447 goto loop_by_list; 448 449 /* by hash */ 450 451 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 452 pp = uvm_pagelookup(uobj, curoff); 453 if (pp == NULL) 454 continue; 455 456 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 457 pp->flags & PG_BUSY, 0, 0); 458#ifdef DIAGNOSTIC 459 if (pp->flags & PG_BUSY) 460 panic("uvm_km_pgremove_intrsafe: busy page"); 461 if (pp->pqflags & PQ_ACTIVE) 462 panic("uvm_km_pgremove_intrsafe: active page"); 463 if (pp->pqflags & PQ_INACTIVE) 464 panic("uvm_km_pgremove_intrsafe: inactive page"); 465#endif 466 467 /* free the page */ 468 uvm_pagefree(pp); 469 } 470 simple_unlock(&uobj->vmobjlock); 471 return; 472 473loop_by_list: 474 475 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 476 ppnext = pp->listq.tqe_next; 477 if (pp->offset < start || pp->offset >= end) { 478 continue; 479 } 480 481 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 482 pp->flags & PG_BUSY, 0, 0); 483 484#ifdef DIAGNOSTIC 485 if (pp->flags & PG_BUSY) 486 panic("uvm_km_pgremove_intrsafe: busy page"); 487 if (pp->pqflags & PQ_ACTIVE) 488 panic("uvm_km_pgremove_intrsafe: active page"); 489 if (pp->pqflags & PQ_INACTIVE) 490 panic("uvm_km_pgremove_intrsafe: inactive page"); 491#endif 492 493 /* free the page */ 494 uvm_pagefree(pp); 495 } 496 simple_unlock(&uobj->vmobjlock); 497 return; 498} 499 500 501/* 502 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 503 * 504 * => we map wired memory into the specified map using the obj passed in 505 * => NOTE: we can return NULL even if we can wait if there is not enough 506 * free VM space in the map... caller should be prepared to handle 507 * this case. 508 * => we return KVA of memory allocated 509 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 510 * lock the map 511 */ 512 513vaddr_t 514uvm_km_kmemalloc(map, obj, size, flags) 515 vm_map_t map; 516 struct uvm_object *obj; 517 vsize_t size; 518 int flags; 519{ 520 vaddr_t kva, loopva; 521 vaddr_t offset; 522 struct vm_page *pg; 523 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 524 525 526 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 527 map, obj, size, flags); 528#ifdef DIAGNOSTIC 529 /* sanity check */ 530 if (vm_map_pmap(map) != pmap_kernel()) 531 panic("uvm_km_kmemalloc: invalid map"); 532#endif 533 534 /* 535 * setup for call 536 */ 537 538 size = round_page(size); 539 kva = vm_map_min(map); /* hint */ 540 541 /* 542 * allocate some virtual space 543 */ 544 545 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 546 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 547 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 548 != KERN_SUCCESS) { 549 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 550 return(0); 551 } 552 553 /* 554 * if all we wanted was VA, return now 555 */ 556 557 if (flags & UVM_KMF_VALLOC) { 558 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 559 return(kva); 560 } 561 /* 562 * recover object offset from virtual address 563 */ 564 565 offset = kva - vm_map_min(kernel_map); 566 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 567 568 /* 569 * now allocate and map in the memory... note that we are the only ones 570 * whom should ever get a handle on this area of VM. 571 */ 572 573 loopva = kva; 574 while (size) { 575 simple_lock(&obj->vmobjlock); 576 pg = uvm_pagealloc(obj, offset, NULL, 0); 577 if (pg) { 578 pg->flags &= ~PG_BUSY; /* new page */ 579 UVM_PAGE_OWN(pg, NULL); 580 } 581 simple_unlock(&obj->vmobjlock); 582 583 /* 584 * out of memory? 585 */ 586 587 if (pg == NULL) { 588 if (flags & UVM_KMF_NOWAIT) { 589 /* free everything! */ 590 uvm_unmap(map, kva, kva + size); 591 return(0); 592 } else { 593 uvm_wait("km_getwait2"); /* sleep here */ 594 continue; 595 } 596 } 597 598 /* 599 * map it in: note that we call pmap_enter with the map and 600 * object unlocked in case we are kmem_map/kmem_object 601 * (because if pmap_enter wants to allocate out of kmem_object 602 * it will need to lock it itself!) 603 */ 604 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { 605 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 606 VM_PROT_ALL); 607 } else { 608 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 609 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 610 } 611 loopva += PAGE_SIZE; 612 offset += PAGE_SIZE; 613 size -= PAGE_SIZE; 614 } 615 616 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 617 return(kva); 618} 619 620/* 621 * uvm_km_free: free an area of kernel memory 622 */ 623 624void 625uvm_km_free(map, addr, size) 626 vm_map_t map; 627 vaddr_t addr; 628 vsize_t size; 629{ 630 631 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 632} 633 634/* 635 * uvm_km_free_wakeup: free an area of kernel memory and wake up 636 * anyone waiting for vm space. 637 * 638 * => XXX: "wanted" bit + unlock&wait on other end? 639 */ 640 641void 642uvm_km_free_wakeup(map, addr, size) 643 vm_map_t map; 644 vaddr_t addr; 645 vsize_t size; 646{ 647 vm_map_entry_t dead_entries; 648 649 vm_map_lock(map); 650 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 651 &dead_entries); 652 wakeup(map); 653 vm_map_unlock(map); 654 655 if (dead_entries != NULL) 656 uvm_unmap_detach(dead_entries, 0); 657} 658 659/* 660 * uvm_km_alloc1: allocate wired down memory in the kernel map. 661 * 662 * => we can sleep if needed 663 */ 664 665vaddr_t 666uvm_km_alloc1(map, size, zeroit) 667 vm_map_t map; 668 vsize_t size; 669 boolean_t zeroit; 670{ 671 vaddr_t kva, loopva, offset; 672 struct vm_page *pg; 673 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 674 675 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 676 677#ifdef DIAGNOSTIC 678 if (vm_map_pmap(map) != pmap_kernel()) 679 panic("uvm_km_alloc1"); 680#endif 681 682 size = round_page(size); 683 kva = vm_map_min(map); /* hint */ 684 685 /* 686 * allocate some virtual space 687 */ 688 689 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 690 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 691 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 692 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 693 return(0); 694 } 695 696 /* 697 * recover object offset from virtual address 698 */ 699 700 offset = kva - vm_map_min(kernel_map); 701 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 702 703 /* 704 * now allocate the memory. we must be careful about released pages. 705 */ 706 707 loopva = kva; 708 while (size) { 709 simple_lock(&uvm.kernel_object->vmobjlock); 710 pg = uvm_pagelookup(uvm.kernel_object, offset); 711 712 /* 713 * if we found a page in an unallocated region, it must be 714 * released 715 */ 716 if (pg) { 717 if ((pg->flags & PG_RELEASED) == 0) 718 panic("uvm_km_alloc1: non-released page"); 719 pg->flags |= PG_WANTED; 720 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 721 FALSE, "km_alloc", 0); 722 continue; /* retry */ 723 } 724 725 /* allocate ram */ 726 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 727 if (pg) { 728 pg->flags &= ~PG_BUSY; /* new page */ 729 UVM_PAGE_OWN(pg, NULL); 730 } 731 simple_unlock(&uvm.kernel_object->vmobjlock); 732 if (pg == NULL) { 733 uvm_wait("km_alloc1w"); /* wait for memory */ 734 continue; 735 } 736 737 /* 738 * map it in; note we're never called with an intrsafe 739 * object, so we always use regular old pmap_enter(). 740 */ 741 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 742 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 743 744 loopva += PAGE_SIZE; 745 offset += PAGE_SIZE; 746 size -= PAGE_SIZE; 747 } 748 749 /* 750 * zero on request (note that "size" is now zero due to the above loop 751 * so we need to subtract kva from loopva to reconstruct the size). 752 */ 753 754 if (zeroit) 755 memset((caddr_t)kva, 0, loopva - kva); 756 757 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 758 return(kva); 759} 760 761/* 762 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 763 * 764 * => memory is not allocated until fault time 765 */ 766 767vaddr_t 768uvm_km_valloc(map, size) 769 vm_map_t map; 770 vsize_t size; 771{ 772 vaddr_t kva; 773 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 774 775 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 776 777#ifdef DIAGNOSTIC 778 if (vm_map_pmap(map) != pmap_kernel()) 779 panic("uvm_km_valloc"); 780#endif 781 782 size = round_page(size); 783 kva = vm_map_min(map); /* hint */ 784 785 /* 786 * allocate some virtual space. will be demand filled by kernel_object. 787 */ 788 789 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 790 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 791 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 792 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 793 return(0); 794 } 795 796 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 797 return(kva); 798} 799 800/* 801 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 802 * 803 * => memory is not allocated until fault time 804 * => if no room in map, wait for space to free, unless requested size 805 * is larger than map (in which case we return 0) 806 */ 807 808vaddr_t 809uvm_km_valloc_wait(map, size) 810 vm_map_t map; 811 vsize_t size; 812{ 813 vaddr_t kva; 814 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist); 815 816 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 817 818#ifdef DIAGNOSTIC 819 if (vm_map_pmap(map) != pmap_kernel()) 820 panic("uvm_km_valloc_wait"); 821#endif 822 823 size = round_page(size); 824 if (size > vm_map_max(map) - vm_map_min(map)) 825 return(0); 826 827 while (1) { 828 kva = vm_map_min(map); /* hint */ 829 830 /* 831 * allocate some virtual space. will be demand filled 832 * by kernel_object. 833 */ 834 835 if (uvm_map(map, &kva, size, uvm.kernel_object, 836 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, 837 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 838 == KERN_SUCCESS) { 839 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 840 return(kva); 841 } 842 843 /* 844 * failed. sleep for a while (on map) 845 */ 846 847 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 848 tsleep((caddr_t)map, PVM, "vallocwait", 0); 849 } 850 /*NOTREACHED*/ 851} 852 853/* Sanity; must specify both or none. */ 854#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 855 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 856#error Must specify MAP and UNMAP together. 857#endif 858 859/* 860 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 861 * 862 * => if the pmap specifies an alternate mapping method, we use it. 863 */ 864 865/* ARGSUSED */ 866vaddr_t 867uvm_km_alloc_poolpage1(map, obj, waitok) 868 vm_map_t map; 869 struct uvm_object *obj; 870 boolean_t waitok; 871{ 872#if defined(PMAP_MAP_POOLPAGE) 873 struct vm_page *pg; 874 vaddr_t va; 875 876 again: 877 pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_USERESERVE); 878 if (pg == NULL) { 879 if (waitok) { 880 uvm_wait("plpg"); 881 goto again; 882 } else 883 return (0); 884 } 885 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 886 if (va == 0) 887 uvm_pagefree(pg); 888 return (va); 889#else 890 vaddr_t va; 891 int s; 892 893 /* 894 * NOTE: We may be called with a map that doens't require splimp 895 * protection (e.g. kernel_map). However, it does not hurt to 896 * go to splimp in this case (since unprocted maps will never be 897 * accessed in interrupt context). 898 * 899 * XXX We may want to consider changing the interface to this 900 * XXX function. 901 */ 902 903 s = splimp(); 904 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 905 splx(s); 906 return (va); 907#endif /* PMAP_MAP_POOLPAGE */ 908} 909 910/* 911 * uvm_km_free_poolpage: free a previously allocated pool page 912 * 913 * => if the pmap specifies an alternate unmapping method, we use it. 914 */ 915 916/* ARGSUSED */ 917void 918uvm_km_free_poolpage1(map, addr) 919 vm_map_t map; 920 vaddr_t addr; 921{ 922#if defined(PMAP_UNMAP_POOLPAGE) 923 paddr_t pa; 924 925 pa = PMAP_UNMAP_POOLPAGE(addr); 926 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 927#else 928 int s; 929 930 /* 931 * NOTE: We may be called with a map that doens't require splimp 932 * protection (e.g. kernel_map). However, it does not hurt to 933 * go to splimp in this case (since unprocted maps will never be 934 * accessed in interrupt context). 935 * 936 * XXX We may want to consider changing the interface to this 937 * XXX function. 938 */ 939 940 s = splimp(); 941 uvm_km_free(map, addr, PAGE_SIZE); 942 splx(s); 943#endif /* PMAP_UNMAP_POOLPAGE */ 944} 945