uvm_km.c revision 1.26
1/* $NetBSD: uvm_km.c,v 1.26 1999/05/26 19:27:49 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69#include "opt_uvmhist.h" 70#include "opt_pmap_new.h" 71 72/* 73 * uvm_km.c: handle kernel memory allocation and management 74 */ 75 76/* 77 * overview of kernel memory management: 78 * 79 * the kernel virtual address space is mapped by "kernel_map." kernel_map 80 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 81 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 82 * 83 * the kernel_map has several "submaps." submaps can only appear in 84 * the kernel_map (user processes can't use them). submaps "take over" 85 * the management of a sub-range of the kernel's address space. submaps 86 * are typically allocated at boot time and are never released. kernel 87 * virtual address space that is mapped by a submap is locked by the 88 * submap's lock -- not the kernel_map's lock. 89 * 90 * thus, the useful feature of submaps is that they allow us to break 91 * up the locking and protection of the kernel address space into smaller 92 * chunks. 93 * 94 * the vm system has several standard kernel submaps, including: 95 * kmem_map => contains only wired kernel memory for the kernel 96 * malloc. *** access to kmem_map must be protected 97 * by splimp() because we are allowed to call malloc() 98 * at interrupt time *** 99 * mb_map => memory for large mbufs, *** protected by splimp *** 100 * pager_map => used to map "buf" structures into kernel space 101 * exec_map => used during exec to handle exec args 102 * etc... 103 * 104 * the kernel allocates its private memory out of special uvm_objects whose 105 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 106 * are "special" and never die). all kernel objects should be thought of 107 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 108 * object is equal to the size of kernel virtual address space (i.e. the 109 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 110 * 111 * most kernel private memory lives in kernel_object. the only exception 112 * to this is for memory that belongs to submaps that must be protected 113 * by splimp(). each of these submaps has their own private kernel 114 * object (e.g. kmem_object, mb_object). 115 * 116 * note that just because a kernel object spans the entire kernel virutal 117 * address space doesn't mean that it has to be mapped into the entire space. 118 * large chunks of a kernel object's space go unused either because 119 * that area of kernel VM is unmapped, or there is some other type of 120 * object mapped into that range (e.g. a vnode). for submap's kernel 121 * objects, the only part of the object that can ever be populated is the 122 * offsets that are managed by the submap. 123 * 124 * note that the "offset" in a kernel object is always the kernel virtual 125 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 126 * example: 127 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 128 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 129 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 130 * then that means that the page at offset 0x235000 in kernel_object is 131 * mapped at 0xf8235000. 132 * 133 * note that the offsets in kmem_object and mb_object also follow this 134 * rule. this means that the offsets for kmem_object must fall in the 135 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 136 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 137 * in those objects will typically not start at zero. 138 * 139 * kernel object have one other special property: when the kernel virtual 140 * memory mapping them is unmapped, the backing memory in the object is 141 * freed right away. this is done with the uvm_km_pgremove() function. 142 * this has to be done because there is no backing store for kernel pages 143 * and no need to save them after they are no longer referenced. 144 */ 145 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/proc.h> 149 150#include <vm/vm.h> 151#include <vm/vm_page.h> 152#include <vm/vm_kern.h> 153 154#include <uvm/uvm.h> 155 156/* 157 * global data structures 158 */ 159 160vm_map_t kernel_map = NULL; 161 162/* 163 * local functions 164 */ 165 166static int uvm_km_get __P((struct uvm_object *, vaddr_t, 167 vm_page_t *, int *, int, vm_prot_t, int, int)); 168 169/* 170 * local data structues 171 */ 172 173static struct vm_map kernel_map_store; 174static struct uvm_object kmem_object_store; 175static struct uvm_object mb_object_store; 176 177static struct uvm_pagerops km_pager = { 178 NULL, /* init */ 179 NULL, /* reference */ 180 NULL, /* detach */ 181 NULL, /* fault */ 182 NULL, /* flush */ 183 uvm_km_get, /* get */ 184 /* ... rest are NULL */ 185}; 186 187/* 188 * uvm_km_get: pager get function for kernel objects 189 * 190 * => currently we do not support pageout to the swap area, so this 191 * pager is very simple. eventually we may want an anonymous 192 * object pager which will do paging. 193 * => XXXCDC: this pager should be phased out in favor of the aobj pager 194 */ 195 196 197static int 198uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 199 struct uvm_object *uobj; 200 vaddr_t offset; 201 struct vm_page **pps; 202 int *npagesp; 203 int centeridx, advice, flags; 204 vm_prot_t access_type; 205{ 206 vaddr_t current_offset; 207 vm_page_t ptmp; 208 int lcv, gotpages, maxpages; 209 boolean_t done; 210 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist); 211 212 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 213 214 /* 215 * get number of pages 216 */ 217 218 maxpages = *npagesp; 219 220 /* 221 * step 1: handled the case where fault data structures are locked. 222 */ 223 224 if (flags & PGO_LOCKED) { 225 226 /* 227 * step 1a: get pages that are already resident. only do 228 * this if the data structures are locked (i.e. the first time 229 * through). 230 */ 231 232 done = TRUE; /* be optimistic */ 233 gotpages = 0; /* # of pages we got so far */ 234 235 for (lcv = 0, current_offset = offset ; 236 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 237 238 /* do we care about this page? if not, skip it */ 239 if (pps[lcv] == PGO_DONTCARE) 240 continue; 241 242 /* lookup page */ 243 ptmp = uvm_pagelookup(uobj, current_offset); 244 245 /* null? attempt to allocate the page */ 246 if (ptmp == NULL) { 247 ptmp = uvm_pagealloc(uobj, current_offset, 248 NULL, 0); 249 if (ptmp) { 250 /* new page */ 251 ptmp->flags &= ~(PG_BUSY|PG_FAKE); 252 UVM_PAGE_OWN(ptmp, NULL); 253 uvm_pagezero(ptmp); 254 } 255 } 256 257 /* 258 * to be useful must get a non-busy, non-released page 259 */ 260 if (ptmp == NULL || 261 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 262 if (lcv == centeridx || 263 (flags & PGO_ALLPAGES) != 0) 264 /* need to do a wait or I/O! */ 265 done = FALSE; 266 continue; 267 } 268 269 /* 270 * useful page: busy/lock it and plug it in our 271 * result array 272 */ 273 274 /* caller must un-busy this page */ 275 ptmp->flags |= PG_BUSY; 276 UVM_PAGE_OWN(ptmp, "uvm_km_get1"); 277 pps[lcv] = ptmp; 278 gotpages++; 279 280 } /* "for" lcv loop */ 281 282 /* 283 * step 1b: now we've either done everything needed or we 284 * to unlock and do some waiting or I/O. 285 */ 286 287 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0); 288 289 *npagesp = gotpages; 290 if (done) 291 return(VM_PAGER_OK); /* bingo! */ 292 else 293 return(VM_PAGER_UNLOCK); /* EEK! Need to 294 * unlock and I/O */ 295 } 296 297 /* 298 * step 2: get non-resident or busy pages. 299 * object is locked. data structures are unlocked. 300 */ 301 302 for (lcv = 0, current_offset = offset ; 303 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 304 305 /* skip over pages we've already gotten or don't want */ 306 /* skip over pages we don't _have_ to get */ 307 if (pps[lcv] != NULL || 308 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) 309 continue; 310 311 /* 312 * we have yet to locate the current page (pps[lcv]). we 313 * first look for a page that is already at the current offset. 314 * if we find a page, we check to see if it is busy or 315 * released. if that is the case, then we sleep on the page 316 * until it is no longer busy or released and repeat the 317 * lookup. if the page we found is neither busy nor 318 * released, then we busy it (so we own it) and plug it into 319 * pps[lcv]. this 'break's the following while loop and 320 * indicates we are ready to move on to the next page in the 321 * "lcv" loop above. 322 * 323 * if we exit the while loop with pps[lcv] still set to NULL, 324 * then it means that we allocated a new busy/fake/clean page 325 * ptmp in the object and we need to do I/O to fill in the 326 * data. 327 */ 328 329 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 330 331 /* look for a current page */ 332 ptmp = uvm_pagelookup(uobj, current_offset); 333 334 /* nope? allocate one now (if we can) */ 335 if (ptmp == NULL) { 336 337 ptmp = uvm_pagealloc(uobj, current_offset, 338 NULL, 0); 339 340 /* out of RAM? */ 341 if (ptmp == NULL) { 342 simple_unlock(&uobj->vmobjlock); 343 uvm_wait("kmgetwait1"); 344 simple_lock(&uobj->vmobjlock); 345 /* goto top of pps while loop */ 346 continue; 347 } 348 349 /* 350 * got new page ready for I/O. break pps 351 * while loop. pps[lcv] is still NULL. 352 */ 353 break; 354 } 355 356 /* page is there, see if we need to wait on it */ 357 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 358 ptmp->flags |= PG_WANTED; 359 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock, 0, 360 "uvn_get",0); 361 simple_lock(&uobj->vmobjlock); 362 continue; /* goto top of pps while loop */ 363 } 364 365 /* 366 * if we get here then the page has become resident 367 * and unbusy between steps 1 and 2. we busy it now 368 * (so we own it) and set pps[lcv] (so that we exit 369 * the while loop). caller must un-busy. 370 */ 371 ptmp->flags |= PG_BUSY; 372 UVM_PAGE_OWN(ptmp, "uvm_km_get2"); 373 pps[lcv] = ptmp; 374 } 375 376 /* 377 * if we own the a valid page at the correct offset, pps[lcv] 378 * will point to it. nothing more to do except go to the 379 * next page. 380 */ 381 382 if (pps[lcv]) 383 continue; /* next lcv */ 384 385 /* 386 * we have a "fake/busy/clean" page that we just allocated. 387 * do the needed "i/o" (in this case that means zero it). 388 */ 389 390 uvm_pagezero(ptmp); 391 ptmp->flags &= ~(PG_FAKE); 392 pps[lcv] = ptmp; 393 394 } /* lcv loop */ 395 396 /* 397 * finally, unlock object and return. 398 */ 399 400 simple_unlock(&uobj->vmobjlock); 401 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0); 402 return(VM_PAGER_OK); 403} 404 405/* 406 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 407 * KVM already allocated for text, data, bss, and static data structures). 408 * 409 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 410 * we assume that [min -> start] has already been allocated and that 411 * "end" is the end. 412 */ 413 414void 415uvm_km_init(start, end) 416 vaddr_t start, end; 417{ 418 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 419 420 /* 421 * first, init kernel memory objects. 422 */ 423 424 /* kernel_object: for pageable anonymous kernel memory */ 425 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 426 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 427 428 /* 429 * kmem_object: for use by the kernel malloc(). Memory is always 430 * wired, and this object (and the kmem_map) can be accessed at 431 * interrupt time. 432 */ 433 simple_lock_init(&kmem_object_store.vmobjlock); 434 kmem_object_store.pgops = &km_pager; 435 TAILQ_INIT(&kmem_object_store.memq); 436 kmem_object_store.uo_npages = 0; 437 /* we are special. we never die */ 438 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 439 uvmexp.kmem_object = &kmem_object_store; 440 441 /* 442 * mb_object: for mbuf cluster pages on platforms which use the 443 * mb_map. Memory is always wired, and this object (and the mb_map) 444 * can be accessed at interrupt time. 445 */ 446 simple_lock_init(&mb_object_store.vmobjlock); 447 mb_object_store.pgops = &km_pager; 448 TAILQ_INIT(&mb_object_store.memq); 449 mb_object_store.uo_npages = 0; 450 /* we are special. we never die */ 451 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 452 uvmexp.mb_object = &mb_object_store; 453 454 /* 455 * init the map and reserve allready allocated kernel space 456 * before installing. 457 */ 458 459 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 460 kernel_map_store.pmap = pmap_kernel(); 461 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 462 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 463 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 464 panic("uvm_km_init: could not reserve space for kernel"); 465 466 /* 467 * install! 468 */ 469 470 kernel_map = &kernel_map_store; 471} 472 473/* 474 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 475 * is allocated all references to that area of VM must go through it. this 476 * allows the locking of VAs in kernel_map to be broken up into regions. 477 * 478 * => if `fixed' is true, *min specifies where the region described 479 * by the submap must start 480 * => if submap is non NULL we use that as the submap, otherwise we 481 * alloc a new map 482 */ 483struct vm_map * 484uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 485 struct vm_map *map; 486 vaddr_t *min, *max; /* OUT, OUT */ 487 vsize_t size; 488 int flags; 489 boolean_t fixed; 490 struct vm_map *submap; 491{ 492 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 493 494 size = round_page(size); /* round up to pagesize */ 495 496 /* 497 * first allocate a blank spot in the parent map 498 */ 499 500 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 501 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 502 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 503 panic("uvm_km_suballoc: unable to allocate space in parent map"); 504 } 505 506 /* 507 * set VM bounds (min is filled in by uvm_map) 508 */ 509 510 *max = *min + size; 511 512 /* 513 * add references to pmap and create or init the submap 514 */ 515 516 pmap_reference(vm_map_pmap(map)); 517 if (submap == NULL) { 518 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 519 if (submap == NULL) 520 panic("uvm_km_suballoc: unable to create submap"); 521 } else { 522 uvm_map_setup(submap, *min, *max, flags); 523 submap->pmap = vm_map_pmap(map); 524 } 525 526 /* 527 * now let uvm_map_submap plug in it... 528 */ 529 530 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 531 panic("uvm_km_suballoc: submap allocation failed"); 532 533 return(submap); 534} 535 536/* 537 * uvm_km_pgremove: remove pages from a kernel uvm_object. 538 * 539 * => when you unmap a part of anonymous kernel memory you want to toss 540 * the pages right away. (this gets called from uvm_unmap_...). 541 */ 542 543#define UKM_HASH_PENALTY 4 /* a guess */ 544 545void 546uvm_km_pgremove(uobj, start, end) 547 struct uvm_object *uobj; 548 vaddr_t start, end; 549{ 550 boolean_t by_list; 551 struct vm_page *pp, *ppnext; 552 vaddr_t curoff; 553 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 554 555 simple_lock(&uobj->vmobjlock); /* lock object */ 556 557#ifdef DIAGNOSTIC 558 if (uobj->pgops != &aobj_pager) 559 panic("uvm_km_pgremove: object %p not an aobj", uobj); 560#endif 561 562 /* choose cheapest traversal */ 563 by_list = (uobj->uo_npages <= 564 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 565 566 if (by_list) 567 goto loop_by_list; 568 569 /* by hash */ 570 571 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 572 pp = uvm_pagelookup(uobj, curoff); 573 if (pp == NULL) 574 continue; 575 576 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 577 pp->flags & PG_BUSY, 0, 0); 578 579 /* now do the actual work */ 580 if (pp->flags & PG_BUSY) { 581 /* owner must check for this when done */ 582 pp->flags |= PG_RELEASED; 583 } else { 584 /* free the swap slot... */ 585 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 586 587 /* 588 * ...and free the page; note it may be on the 589 * active or inactive queues. 590 */ 591 uvm_lock_pageq(); 592 uvm_pagefree(pp); 593 uvm_unlock_pageq(); 594 } 595 /* done */ 596 } 597 simple_unlock(&uobj->vmobjlock); 598 return; 599 600loop_by_list: 601 602 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 603 ppnext = pp->listq.tqe_next; 604 if (pp->offset < start || pp->offset >= end) { 605 continue; 606 } 607 608 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 609 pp->flags & PG_BUSY, 0, 0); 610 611 /* now do the actual work */ 612 if (pp->flags & PG_BUSY) { 613 /* owner must check for this when done */ 614 pp->flags |= PG_RELEASED; 615 } else { 616 /* free the swap slot... */ 617 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 618 619 /* 620 * ...and free the page; note it may be on the 621 * active or inactive queues. 622 */ 623 uvm_lock_pageq(); 624 uvm_pagefree(pp); 625 uvm_unlock_pageq(); 626 } 627 /* done */ 628 } 629 simple_unlock(&uobj->vmobjlock); 630 return; 631} 632 633 634/* 635 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 636 * objects 637 * 638 * => when you unmap a part of anonymous kernel memory you want to toss 639 * the pages right away. (this gets called from uvm_unmap_...). 640 * => none of the pages will ever be busy, and none of them will ever 641 * be on the active or inactive queues (because these objects are 642 * never allowed to "page"). 643 */ 644 645void 646uvm_km_pgremove_intrsafe(uobj, start, end) 647 struct uvm_object *uobj; 648 vaddr_t start, end; 649{ 650 boolean_t by_list; 651 struct vm_page *pp, *ppnext; 652 vaddr_t curoff; 653 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 654 655 simple_lock(&uobj->vmobjlock); /* lock object */ 656 657#ifdef DIAGNOSTIC 658 if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0) 659 panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj); 660#endif 661 662 /* choose cheapest traversal */ 663 by_list = (uobj->uo_npages <= 664 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 665 666 if (by_list) 667 goto loop_by_list; 668 669 /* by hash */ 670 671 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 672 pp = uvm_pagelookup(uobj, curoff); 673 if (pp == NULL) 674 continue; 675 676 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 677 pp->flags & PG_BUSY, 0, 0); 678#ifdef DIAGNOSTIC 679 if (pp->flags & PG_BUSY) 680 panic("uvm_km_pgremove_intrsafe: busy page"); 681 if (pp->pqflags & PQ_ACTIVE) 682 panic("uvm_km_pgremove_intrsafe: active page"); 683 if (pp->pqflags & PQ_INACTIVE) 684 panic("uvm_km_pgremove_intrsafe: inactive page"); 685#endif 686 687 /* free the page */ 688 uvm_pagefree(pp); 689 } 690 simple_unlock(&uobj->vmobjlock); 691 return; 692 693loop_by_list: 694 695 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 696 ppnext = pp->listq.tqe_next; 697 if (pp->offset < start || pp->offset >= end) { 698 continue; 699 } 700 701 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 702 pp->flags & PG_BUSY, 0, 0); 703 704#ifdef DIAGNOSTIC 705 if (pp->flags & PG_BUSY) 706 panic("uvm_km_pgremove_intrsafe: busy page"); 707 if (pp->pqflags & PQ_ACTIVE) 708 panic("uvm_km_pgremove_intrsafe: active page"); 709 if (pp->pqflags & PQ_INACTIVE) 710 panic("uvm_km_pgremove_intrsafe: inactive page"); 711#endif 712 713 /* free the page */ 714 uvm_pagefree(pp); 715 } 716 simple_unlock(&uobj->vmobjlock); 717 return; 718} 719 720 721/* 722 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 723 * 724 * => we map wired memory into the specified map using the obj passed in 725 * => NOTE: we can return NULL even if we can wait if there is not enough 726 * free VM space in the map... caller should be prepared to handle 727 * this case. 728 * => we return KVA of memory allocated 729 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 730 * lock the map 731 */ 732 733vaddr_t 734uvm_km_kmemalloc(map, obj, size, flags) 735 vm_map_t map; 736 struct uvm_object *obj; 737 vsize_t size; 738 int flags; 739{ 740 vaddr_t kva, loopva; 741 vaddr_t offset; 742 struct vm_page *pg; 743 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 744 745 746 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 747 map, obj, size, flags); 748#ifdef DIAGNOSTIC 749 /* sanity check */ 750 if (vm_map_pmap(map) != pmap_kernel()) 751 panic("uvm_km_kmemalloc: invalid map"); 752#endif 753 754 /* 755 * setup for call 756 */ 757 758 size = round_page(size); 759 kva = vm_map_min(map); /* hint */ 760 761 /* 762 * allocate some virtual space 763 */ 764 765 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 766 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 767 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 768 != KERN_SUCCESS) { 769 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 770 return(0); 771 } 772 773 /* 774 * if all we wanted was VA, return now 775 */ 776 777 if (flags & UVM_KMF_VALLOC) { 778 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 779 return(kva); 780 } 781 /* 782 * recover object offset from virtual address 783 */ 784 785 offset = kva - vm_map_min(kernel_map); 786 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 787 788 /* 789 * now allocate and map in the memory... note that we are the only ones 790 * whom should ever get a handle on this area of VM. 791 */ 792 793 loopva = kva; 794 while (size) { 795 simple_lock(&obj->vmobjlock); 796 pg = uvm_pagealloc(obj, offset, NULL, 0); 797 if (pg) { 798 pg->flags &= ~PG_BUSY; /* new page */ 799 UVM_PAGE_OWN(pg, NULL); 800 } 801 simple_unlock(&obj->vmobjlock); 802 803 /* 804 * out of memory? 805 */ 806 807 if (pg == NULL) { 808 if (flags & UVM_KMF_NOWAIT) { 809 /* free everything! */ 810 uvm_unmap(map, kva, kva + size); 811 return(0); 812 } else { 813 uvm_wait("km_getwait2"); /* sleep here */ 814 continue; 815 } 816 } 817 818 /* 819 * map it in: note that we call pmap_enter with the map and 820 * object unlocked in case we are kmem_map/kmem_object 821 * (because if pmap_enter wants to allocate out of kmem_object 822 * it will need to lock it itself!) 823 */ 824 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { 825#if defined(PMAP_NEW) 826 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 827 VM_PROT_ALL); 828#else 829 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 830 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 831#endif 832 } else { 833 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 834 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 835 } 836 loopva += PAGE_SIZE; 837 offset += PAGE_SIZE; 838 size -= PAGE_SIZE; 839 } 840 841 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 842 return(kva); 843} 844 845/* 846 * uvm_km_free: free an area of kernel memory 847 */ 848 849void 850uvm_km_free(map, addr, size) 851 vm_map_t map; 852 vaddr_t addr; 853 vsize_t size; 854{ 855 856 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 857} 858 859/* 860 * uvm_km_free_wakeup: free an area of kernel memory and wake up 861 * anyone waiting for vm space. 862 * 863 * => XXX: "wanted" bit + unlock&wait on other end? 864 */ 865 866void 867uvm_km_free_wakeup(map, addr, size) 868 vm_map_t map; 869 vaddr_t addr; 870 vsize_t size; 871{ 872 vm_map_entry_t dead_entries; 873 874 vm_map_lock(map); 875 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 876 &dead_entries); 877 thread_wakeup(map); 878 vm_map_unlock(map); 879 880 if (dead_entries != NULL) 881 uvm_unmap_detach(dead_entries, 0); 882} 883 884/* 885 * uvm_km_alloc1: allocate wired down memory in the kernel map. 886 * 887 * => we can sleep if needed 888 */ 889 890vaddr_t 891uvm_km_alloc1(map, size, zeroit) 892 vm_map_t map; 893 vsize_t size; 894 boolean_t zeroit; 895{ 896 vaddr_t kva, loopva, offset; 897 struct vm_page *pg; 898 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 899 900 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 901 902#ifdef DIAGNOSTIC 903 if (vm_map_pmap(map) != pmap_kernel()) 904 panic("uvm_km_alloc1"); 905#endif 906 907 size = round_page(size); 908 kva = vm_map_min(map); /* hint */ 909 910 /* 911 * allocate some virtual space 912 */ 913 914 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 915 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 916 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 917 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 918 return(0); 919 } 920 921 /* 922 * recover object offset from virtual address 923 */ 924 925 offset = kva - vm_map_min(kernel_map); 926 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 927 928 /* 929 * now allocate the memory. we must be careful about released pages. 930 */ 931 932 loopva = kva; 933 while (size) { 934 simple_lock(&uvm.kernel_object->vmobjlock); 935 pg = uvm_pagelookup(uvm.kernel_object, offset); 936 937 /* 938 * if we found a page in an unallocated region, it must be 939 * released 940 */ 941 if (pg) { 942 if ((pg->flags & PG_RELEASED) == 0) 943 panic("uvm_km_alloc1: non-released page"); 944 pg->flags |= PG_WANTED; 945 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 946 0, "km_alloc", 0); 947 continue; /* retry */ 948 } 949 950 /* allocate ram */ 951 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 952 if (pg) { 953 pg->flags &= ~PG_BUSY; /* new page */ 954 UVM_PAGE_OWN(pg, NULL); 955 } 956 simple_unlock(&uvm.kernel_object->vmobjlock); 957 if (pg == NULL) { 958 uvm_wait("km_alloc1w"); /* wait for memory */ 959 continue; 960 } 961 962 /* 963 * map it in; note we're never called with an intrsafe 964 * object, so we always use regular old pmap_enter(). 965 */ 966 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 967 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 968 969 loopva += PAGE_SIZE; 970 offset += PAGE_SIZE; 971 size -= PAGE_SIZE; 972 } 973 974 /* 975 * zero on request (note that "size" is now zero due to the above loop 976 * so we need to subtract kva from loopva to reconstruct the size). 977 */ 978 979 if (zeroit) 980 memset((caddr_t)kva, 0, loopva - kva); 981 982 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 983 return(kva); 984} 985 986/* 987 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 988 * 989 * => memory is not allocated until fault time 990 */ 991 992vaddr_t 993uvm_km_valloc(map, size) 994 vm_map_t map; 995 vsize_t size; 996{ 997 vaddr_t kva; 998 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 999 1000 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 1001 1002#ifdef DIAGNOSTIC 1003 if (vm_map_pmap(map) != pmap_kernel()) 1004 panic("uvm_km_valloc"); 1005#endif 1006 1007 size = round_page(size); 1008 kva = vm_map_min(map); /* hint */ 1009 1010 /* 1011 * allocate some virtual space. will be demand filled by kernel_object. 1012 */ 1013 1014 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 1015 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 1016 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 1017 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 1018 return(0); 1019 } 1020 1021 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 1022 return(kva); 1023} 1024 1025/* 1026 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 1027 * 1028 * => memory is not allocated until fault time 1029 * => if no room in map, wait for space to free, unless requested size 1030 * is larger than map (in which case we return 0) 1031 */ 1032 1033vaddr_t 1034uvm_km_valloc_wait(map, size) 1035 vm_map_t map; 1036 vsize_t size; 1037{ 1038 vaddr_t kva; 1039 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist); 1040 1041 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 1042 1043#ifdef DIAGNOSTIC 1044 if (vm_map_pmap(map) != pmap_kernel()) 1045 panic("uvm_km_valloc_wait"); 1046#endif 1047 1048 size = round_page(size); 1049 if (size > vm_map_max(map) - vm_map_min(map)) 1050 return(0); 1051 1052 while (1) { 1053 kva = vm_map_min(map); /* hint */ 1054 1055 /* 1056 * allocate some virtual space. will be demand filled 1057 * by kernel_object. 1058 */ 1059 1060 if (uvm_map(map, &kva, size, uvm.kernel_object, 1061 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, 1062 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 1063 == KERN_SUCCESS) { 1064 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 1065 return(kva); 1066 } 1067 1068 /* 1069 * failed. sleep for a while (on map) 1070 */ 1071 1072 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 1073 tsleep((caddr_t)map, PVM, "vallocwait", 0); 1074 } 1075 /*NOTREACHED*/ 1076} 1077 1078/* Sanity; must specify both or none. */ 1079#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 1080 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 1081#error Must specify MAP and UNMAP together. 1082#endif 1083 1084/* 1085 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 1086 * 1087 * => if the pmap specifies an alternate mapping method, we use it. 1088 */ 1089 1090/* ARGSUSED */ 1091vaddr_t 1092uvm_km_alloc_poolpage1(map, obj, waitok) 1093 vm_map_t map; 1094 struct uvm_object *obj; 1095 boolean_t waitok; 1096{ 1097#if defined(PMAP_MAP_POOLPAGE) 1098 struct vm_page *pg; 1099 vaddr_t va; 1100 1101 again: 1102 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1103 if (pg == NULL) { 1104 if (waitok) { 1105 uvm_wait("plpg"); 1106 goto again; 1107 } else 1108 return (0); 1109 } 1110 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 1111 if (va == 0) 1112 uvm_pagefree(pg); 1113 return (va); 1114#else 1115 vaddr_t va; 1116 int s; 1117 1118 /* 1119 * NOTE: We may be called with a map that doens't require splimp 1120 * protection (e.g. kernel_map). However, it does not hurt to 1121 * go to splimp in this case (since unprocted maps will never be 1122 * accessed in interrupt context). 1123 * 1124 * XXX We may want to consider changing the interface to this 1125 * XXX function. 1126 */ 1127 1128 s = splimp(); 1129 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 1130 splx(s); 1131 return (va); 1132#endif /* PMAP_MAP_POOLPAGE */ 1133} 1134 1135/* 1136 * uvm_km_free_poolpage: free a previously allocated pool page 1137 * 1138 * => if the pmap specifies an alternate unmapping method, we use it. 1139 */ 1140 1141/* ARGSUSED */ 1142void 1143uvm_km_free_poolpage1(map, addr) 1144 vm_map_t map; 1145 vaddr_t addr; 1146{ 1147#if defined(PMAP_UNMAP_POOLPAGE) 1148 paddr_t pa; 1149 1150 pa = PMAP_UNMAP_POOLPAGE(addr); 1151 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 1152#else 1153 int s; 1154 1155 /* 1156 * NOTE: We may be called with a map that doens't require splimp 1157 * protection (e.g. kernel_map). However, it does not hurt to 1158 * go to splimp in this case (since unprocted maps will never be 1159 * accessed in interrupt context). 1160 * 1161 * XXX We may want to consider changing the interface to this 1162 * XXX function. 1163 */ 1164 1165 s = splimp(); 1166 uvm_km_free(map, addr, PAGE_SIZE); 1167 splx(s); 1168#endif /* PMAP_UNMAP_POOLPAGE */ 1169} 1170