uvm_km.c revision 1.27
1/* $NetBSD: uvm_km.c,v 1.27 1999/06/04 23:38:41 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 42 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69#include "opt_uvmhist.h" 70#include "opt_pmap_new.h" 71 72/* 73 * uvm_km.c: handle kernel memory allocation and management 74 */ 75 76/* 77 * overview of kernel memory management: 78 * 79 * the kernel virtual address space is mapped by "kernel_map." kernel_map 80 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 81 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 82 * 83 * the kernel_map has several "submaps." submaps can only appear in 84 * the kernel_map (user processes can't use them). submaps "take over" 85 * the management of a sub-range of the kernel's address space. submaps 86 * are typically allocated at boot time and are never released. kernel 87 * virtual address space that is mapped by a submap is locked by the 88 * submap's lock -- not the kernel_map's lock. 89 * 90 * thus, the useful feature of submaps is that they allow us to break 91 * up the locking and protection of the kernel address space into smaller 92 * chunks. 93 * 94 * the vm system has several standard kernel submaps, including: 95 * kmem_map => contains only wired kernel memory for the kernel 96 * malloc. *** access to kmem_map must be protected 97 * by splimp() because we are allowed to call malloc() 98 * at interrupt time *** 99 * mb_map => memory for large mbufs, *** protected by splimp *** 100 * pager_map => used to map "buf" structures into kernel space 101 * exec_map => used during exec to handle exec args 102 * etc... 103 * 104 * the kernel allocates its private memory out of special uvm_objects whose 105 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 106 * are "special" and never die). all kernel objects should be thought of 107 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 108 * object is equal to the size of kernel virtual address space (i.e. the 109 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 110 * 111 * most kernel private memory lives in kernel_object. the only exception 112 * to this is for memory that belongs to submaps that must be protected 113 * by splimp(). each of these submaps has their own private kernel 114 * object (e.g. kmem_object, mb_object). 115 * 116 * note that just because a kernel object spans the entire kernel virutal 117 * address space doesn't mean that it has to be mapped into the entire space. 118 * large chunks of a kernel object's space go unused either because 119 * that area of kernel VM is unmapped, or there is some other type of 120 * object mapped into that range (e.g. a vnode). for submap's kernel 121 * objects, the only part of the object that can ever be populated is the 122 * offsets that are managed by the submap. 123 * 124 * note that the "offset" in a kernel object is always the kernel virtual 125 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 126 * example: 127 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 128 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 129 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 130 * then that means that the page at offset 0x235000 in kernel_object is 131 * mapped at 0xf8235000. 132 * 133 * note that the offsets in kmem_object and mb_object also follow this 134 * rule. this means that the offsets for kmem_object must fall in the 135 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 136 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 137 * in those objects will typically not start at zero. 138 * 139 * kernel object have one other special property: when the kernel virtual 140 * memory mapping them is unmapped, the backing memory in the object is 141 * freed right away. this is done with the uvm_km_pgremove() function. 142 * this has to be done because there is no backing store for kernel pages 143 * and no need to save them after they are no longer referenced. 144 */ 145 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/proc.h> 149 150#include <vm/vm.h> 151#include <vm/vm_page.h> 152#include <vm/vm_kern.h> 153 154#include <uvm/uvm.h> 155 156/* 157 * global data structures 158 */ 159 160vm_map_t kernel_map = NULL; 161 162struct vmi_list vmi_list; 163simple_lock_data_t vmi_list_slock; 164 165/* 166 * local functions 167 */ 168 169static int uvm_km_get __P((struct uvm_object *, vaddr_t, 170 vm_page_t *, int *, int, vm_prot_t, int, int)); 171 172/* 173 * local data structues 174 */ 175 176static struct vm_map kernel_map_store; 177static struct uvm_object kmem_object_store; 178static struct uvm_object mb_object_store; 179 180static struct uvm_pagerops km_pager = { 181 NULL, /* init */ 182 NULL, /* reference */ 183 NULL, /* detach */ 184 NULL, /* fault */ 185 NULL, /* flush */ 186 uvm_km_get, /* get */ 187 /* ... rest are NULL */ 188}; 189 190/* 191 * uvm_km_get: pager get function for kernel objects 192 * 193 * => currently we do not support pageout to the swap area, so this 194 * pager is very simple. eventually we may want an anonymous 195 * object pager which will do paging. 196 * => XXXCDC: this pager should be phased out in favor of the aobj pager 197 */ 198 199 200static int 201uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 202 struct uvm_object *uobj; 203 vaddr_t offset; 204 struct vm_page **pps; 205 int *npagesp; 206 int centeridx, advice, flags; 207 vm_prot_t access_type; 208{ 209 vaddr_t current_offset; 210 vm_page_t ptmp; 211 int lcv, gotpages, maxpages; 212 boolean_t done; 213 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist); 214 215 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 216 217 /* 218 * get number of pages 219 */ 220 221 maxpages = *npagesp; 222 223 /* 224 * step 1: handled the case where fault data structures are locked. 225 */ 226 227 if (flags & PGO_LOCKED) { 228 229 /* 230 * step 1a: get pages that are already resident. only do 231 * this if the data structures are locked (i.e. the first time 232 * through). 233 */ 234 235 done = TRUE; /* be optimistic */ 236 gotpages = 0; /* # of pages we got so far */ 237 238 for (lcv = 0, current_offset = offset ; 239 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 240 241 /* do we care about this page? if not, skip it */ 242 if (pps[lcv] == PGO_DONTCARE) 243 continue; 244 245 /* lookup page */ 246 ptmp = uvm_pagelookup(uobj, current_offset); 247 248 /* null? attempt to allocate the page */ 249 if (ptmp == NULL) { 250 ptmp = uvm_pagealloc(uobj, current_offset, 251 NULL, 0); 252 if (ptmp) { 253 /* new page */ 254 ptmp->flags &= ~(PG_BUSY|PG_FAKE); 255 UVM_PAGE_OWN(ptmp, NULL); 256 uvm_pagezero(ptmp); 257 } 258 } 259 260 /* 261 * to be useful must get a non-busy, non-released page 262 */ 263 if (ptmp == NULL || 264 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 265 if (lcv == centeridx || 266 (flags & PGO_ALLPAGES) != 0) 267 /* need to do a wait or I/O! */ 268 done = FALSE; 269 continue; 270 } 271 272 /* 273 * useful page: busy/lock it and plug it in our 274 * result array 275 */ 276 277 /* caller must un-busy this page */ 278 ptmp->flags |= PG_BUSY; 279 UVM_PAGE_OWN(ptmp, "uvm_km_get1"); 280 pps[lcv] = ptmp; 281 gotpages++; 282 283 } /* "for" lcv loop */ 284 285 /* 286 * step 1b: now we've either done everything needed or we 287 * to unlock and do some waiting or I/O. 288 */ 289 290 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0); 291 292 *npagesp = gotpages; 293 if (done) 294 return(VM_PAGER_OK); /* bingo! */ 295 else 296 return(VM_PAGER_UNLOCK); /* EEK! Need to 297 * unlock and I/O */ 298 } 299 300 /* 301 * step 2: get non-resident or busy pages. 302 * object is locked. data structures are unlocked. 303 */ 304 305 for (lcv = 0, current_offset = offset ; 306 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 307 308 /* skip over pages we've already gotten or don't want */ 309 /* skip over pages we don't _have_ to get */ 310 if (pps[lcv] != NULL || 311 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) 312 continue; 313 314 /* 315 * we have yet to locate the current page (pps[lcv]). we 316 * first look for a page that is already at the current offset. 317 * if we find a page, we check to see if it is busy or 318 * released. if that is the case, then we sleep on the page 319 * until it is no longer busy or released and repeat the 320 * lookup. if the page we found is neither busy nor 321 * released, then we busy it (so we own it) and plug it into 322 * pps[lcv]. this 'break's the following while loop and 323 * indicates we are ready to move on to the next page in the 324 * "lcv" loop above. 325 * 326 * if we exit the while loop with pps[lcv] still set to NULL, 327 * then it means that we allocated a new busy/fake/clean page 328 * ptmp in the object and we need to do I/O to fill in the 329 * data. 330 */ 331 332 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 333 334 /* look for a current page */ 335 ptmp = uvm_pagelookup(uobj, current_offset); 336 337 /* nope? allocate one now (if we can) */ 338 if (ptmp == NULL) { 339 340 ptmp = uvm_pagealloc(uobj, current_offset, 341 NULL, 0); 342 343 /* out of RAM? */ 344 if (ptmp == NULL) { 345 simple_unlock(&uobj->vmobjlock); 346 uvm_wait("kmgetwait1"); 347 simple_lock(&uobj->vmobjlock); 348 /* goto top of pps while loop */ 349 continue; 350 } 351 352 /* 353 * got new page ready for I/O. break pps 354 * while loop. pps[lcv] is still NULL. 355 */ 356 break; 357 } 358 359 /* page is there, see if we need to wait on it */ 360 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 361 ptmp->flags |= PG_WANTED; 362 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock, 0, 363 "uvn_get",0); 364 simple_lock(&uobj->vmobjlock); 365 continue; /* goto top of pps while loop */ 366 } 367 368 /* 369 * if we get here then the page has become resident 370 * and unbusy between steps 1 and 2. we busy it now 371 * (so we own it) and set pps[lcv] (so that we exit 372 * the while loop). caller must un-busy. 373 */ 374 ptmp->flags |= PG_BUSY; 375 UVM_PAGE_OWN(ptmp, "uvm_km_get2"); 376 pps[lcv] = ptmp; 377 } 378 379 /* 380 * if we own the a valid page at the correct offset, pps[lcv] 381 * will point to it. nothing more to do except go to the 382 * next page. 383 */ 384 385 if (pps[lcv]) 386 continue; /* next lcv */ 387 388 /* 389 * we have a "fake/busy/clean" page that we just allocated. 390 * do the needed "i/o" (in this case that means zero it). 391 */ 392 393 uvm_pagezero(ptmp); 394 ptmp->flags &= ~(PG_FAKE); 395 pps[lcv] = ptmp; 396 397 } /* lcv loop */ 398 399 /* 400 * finally, unlock object and return. 401 */ 402 403 simple_unlock(&uobj->vmobjlock); 404 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0); 405 return(VM_PAGER_OK); 406} 407 408/* 409 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 410 * KVM already allocated for text, data, bss, and static data structures). 411 * 412 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 413 * we assume that [min -> start] has already been allocated and that 414 * "end" is the end. 415 */ 416 417void 418uvm_km_init(start, end) 419 vaddr_t start, end; 420{ 421 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 422 423 /* 424 * first, initialize the interrupt-safe map list. 425 */ 426 LIST_INIT(&vmi_list); 427 simple_lock_init(&vmi_list_slock); 428 429 /* 430 * next, init kernel memory objects. 431 */ 432 433 /* kernel_object: for pageable anonymous kernel memory */ 434 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 435 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 436 437 /* 438 * kmem_object: for use by the kernel malloc(). Memory is always 439 * wired, and this object (and the kmem_map) can be accessed at 440 * interrupt time. 441 */ 442 simple_lock_init(&kmem_object_store.vmobjlock); 443 kmem_object_store.pgops = &km_pager; 444 TAILQ_INIT(&kmem_object_store.memq); 445 kmem_object_store.uo_npages = 0; 446 /* we are special. we never die */ 447 kmem_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 448 uvmexp.kmem_object = &kmem_object_store; 449 450 /* 451 * mb_object: for mbuf cluster pages on platforms which use the 452 * mb_map. Memory is always wired, and this object (and the mb_map) 453 * can be accessed at interrupt time. 454 */ 455 simple_lock_init(&mb_object_store.vmobjlock); 456 mb_object_store.pgops = &km_pager; 457 TAILQ_INIT(&mb_object_store.memq); 458 mb_object_store.uo_npages = 0; 459 /* we are special. we never die */ 460 mb_object_store.uo_refs = UVM_OBJ_KERN_INTRSAFE; 461 uvmexp.mb_object = &mb_object_store; 462 463 /* 464 * init the map and reserve allready allocated kernel space 465 * before installing. 466 */ 467 468 uvm_map_setup(&kernel_map_store, base, end, VM_MAP_PAGEABLE); 469 kernel_map_store.pmap = pmap_kernel(); 470 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 471 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 472 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 473 panic("uvm_km_init: could not reserve space for kernel"); 474 475 /* 476 * install! 477 */ 478 479 kernel_map = &kernel_map_store; 480} 481 482/* 483 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 484 * is allocated all references to that area of VM must go through it. this 485 * allows the locking of VAs in kernel_map to be broken up into regions. 486 * 487 * => if `fixed' is true, *min specifies where the region described 488 * by the submap must start 489 * => if submap is non NULL we use that as the submap, otherwise we 490 * alloc a new map 491 */ 492struct vm_map * 493uvm_km_suballoc(map, min, max, size, flags, fixed, submap) 494 struct vm_map *map; 495 vaddr_t *min, *max; /* OUT, OUT */ 496 vsize_t size; 497 int flags; 498 boolean_t fixed; 499 struct vm_map *submap; 500{ 501 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 502 503 size = round_page(size); /* round up to pagesize */ 504 505 /* 506 * first allocate a blank spot in the parent map 507 */ 508 509 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 510 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 511 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 512 panic("uvm_km_suballoc: unable to allocate space in parent map"); 513 } 514 515 /* 516 * set VM bounds (min is filled in by uvm_map) 517 */ 518 519 *max = *min + size; 520 521 /* 522 * add references to pmap and create or init the submap 523 */ 524 525 pmap_reference(vm_map_pmap(map)); 526 if (submap == NULL) { 527 submap = uvm_map_create(vm_map_pmap(map), *min, *max, flags); 528 if (submap == NULL) 529 panic("uvm_km_suballoc: unable to create submap"); 530 } else { 531 uvm_map_setup(submap, *min, *max, flags); 532 submap->pmap = vm_map_pmap(map); 533 } 534 535 /* 536 * now let uvm_map_submap plug in it... 537 */ 538 539 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 540 panic("uvm_km_suballoc: submap allocation failed"); 541 542 return(submap); 543} 544 545/* 546 * uvm_km_pgremove: remove pages from a kernel uvm_object. 547 * 548 * => when you unmap a part of anonymous kernel memory you want to toss 549 * the pages right away. (this gets called from uvm_unmap_...). 550 */ 551 552#define UKM_HASH_PENALTY 4 /* a guess */ 553 554void 555uvm_km_pgremove(uobj, start, end) 556 struct uvm_object *uobj; 557 vaddr_t start, end; 558{ 559 boolean_t by_list; 560 struct vm_page *pp, *ppnext; 561 vaddr_t curoff; 562 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 563 564 simple_lock(&uobj->vmobjlock); /* lock object */ 565 566#ifdef DIAGNOSTIC 567 if (uobj->pgops != &aobj_pager) 568 panic("uvm_km_pgremove: object %p not an aobj", uobj); 569#endif 570 571 /* choose cheapest traversal */ 572 by_list = (uobj->uo_npages <= 573 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 574 575 if (by_list) 576 goto loop_by_list; 577 578 /* by hash */ 579 580 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 581 pp = uvm_pagelookup(uobj, curoff); 582 if (pp == NULL) 583 continue; 584 585 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 586 pp->flags & PG_BUSY, 0, 0); 587 588 /* now do the actual work */ 589 if (pp->flags & PG_BUSY) { 590 /* owner must check for this when done */ 591 pp->flags |= PG_RELEASED; 592 } else { 593 /* free the swap slot... */ 594 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 595 596 /* 597 * ...and free the page; note it may be on the 598 * active or inactive queues. 599 */ 600 uvm_lock_pageq(); 601 uvm_pagefree(pp); 602 uvm_unlock_pageq(); 603 } 604 /* done */ 605 } 606 simple_unlock(&uobj->vmobjlock); 607 return; 608 609loop_by_list: 610 611 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 612 ppnext = pp->listq.tqe_next; 613 if (pp->offset < start || pp->offset >= end) { 614 continue; 615 } 616 617 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 618 pp->flags & PG_BUSY, 0, 0); 619 620 /* now do the actual work */ 621 if (pp->flags & PG_BUSY) { 622 /* owner must check for this when done */ 623 pp->flags |= PG_RELEASED; 624 } else { 625 /* free the swap slot... */ 626 uao_dropswap(uobj, pp->offset >> PAGE_SHIFT); 627 628 /* 629 * ...and free the page; note it may be on the 630 * active or inactive queues. 631 */ 632 uvm_lock_pageq(); 633 uvm_pagefree(pp); 634 uvm_unlock_pageq(); 635 } 636 /* done */ 637 } 638 simple_unlock(&uobj->vmobjlock); 639 return; 640} 641 642 643/* 644 * uvm_km_pgremove_intrsafe: like uvm_km_pgremove(), but for "intrsafe" 645 * objects 646 * 647 * => when you unmap a part of anonymous kernel memory you want to toss 648 * the pages right away. (this gets called from uvm_unmap_...). 649 * => none of the pages will ever be busy, and none of them will ever 650 * be on the active or inactive queues (because these objects are 651 * never allowed to "page"). 652 */ 653 654void 655uvm_km_pgremove_intrsafe(uobj, start, end) 656 struct uvm_object *uobj; 657 vaddr_t start, end; 658{ 659 boolean_t by_list; 660 struct vm_page *pp, *ppnext; 661 vaddr_t curoff; 662 UVMHIST_FUNC("uvm_km_pgremove_intrsafe"); UVMHIST_CALLED(maphist); 663 664 simple_lock(&uobj->vmobjlock); /* lock object */ 665 666#ifdef DIAGNOSTIC 667 if (UVM_OBJ_IS_INTRSAFE_OBJECT(uobj) == 0) 668 panic("uvm_km_pgremove_intrsafe: object %p not intrsafe", uobj); 669#endif 670 671 /* choose cheapest traversal */ 672 by_list = (uobj->uo_npages <= 673 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 674 675 if (by_list) 676 goto loop_by_list; 677 678 /* by hash */ 679 680 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 681 pp = uvm_pagelookup(uobj, curoff); 682 if (pp == NULL) 683 continue; 684 685 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 686 pp->flags & PG_BUSY, 0, 0); 687#ifdef DIAGNOSTIC 688 if (pp->flags & PG_BUSY) 689 panic("uvm_km_pgremove_intrsafe: busy page"); 690 if (pp->pqflags & PQ_ACTIVE) 691 panic("uvm_km_pgremove_intrsafe: active page"); 692 if (pp->pqflags & PQ_INACTIVE) 693 panic("uvm_km_pgremove_intrsafe: inactive page"); 694#endif 695 696 /* free the page */ 697 uvm_pagefree(pp); 698 } 699 simple_unlock(&uobj->vmobjlock); 700 return; 701 702loop_by_list: 703 704 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 705 ppnext = pp->listq.tqe_next; 706 if (pp->offset < start || pp->offset >= end) { 707 continue; 708 } 709 710 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 711 pp->flags & PG_BUSY, 0, 0); 712 713#ifdef DIAGNOSTIC 714 if (pp->flags & PG_BUSY) 715 panic("uvm_km_pgremove_intrsafe: busy page"); 716 if (pp->pqflags & PQ_ACTIVE) 717 panic("uvm_km_pgremove_intrsafe: active page"); 718 if (pp->pqflags & PQ_INACTIVE) 719 panic("uvm_km_pgremove_intrsafe: inactive page"); 720#endif 721 722 /* free the page */ 723 uvm_pagefree(pp); 724 } 725 simple_unlock(&uobj->vmobjlock); 726 return; 727} 728 729 730/* 731 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 732 * 733 * => we map wired memory into the specified map using the obj passed in 734 * => NOTE: we can return NULL even if we can wait if there is not enough 735 * free VM space in the map... caller should be prepared to handle 736 * this case. 737 * => we return KVA of memory allocated 738 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 739 * lock the map 740 */ 741 742vaddr_t 743uvm_km_kmemalloc(map, obj, size, flags) 744 vm_map_t map; 745 struct uvm_object *obj; 746 vsize_t size; 747 int flags; 748{ 749 vaddr_t kva, loopva; 750 vaddr_t offset; 751 struct vm_page *pg; 752 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 753 754 755 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 756 map, obj, size, flags); 757#ifdef DIAGNOSTIC 758 /* sanity check */ 759 if (vm_map_pmap(map) != pmap_kernel()) 760 panic("uvm_km_kmemalloc: invalid map"); 761#endif 762 763 /* 764 * setup for call 765 */ 766 767 size = round_page(size); 768 kva = vm_map_min(map); /* hint */ 769 770 /* 771 * allocate some virtual space 772 */ 773 774 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 775 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 776 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 777 != KERN_SUCCESS) { 778 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 779 return(0); 780 } 781 782 /* 783 * if all we wanted was VA, return now 784 */ 785 786 if (flags & UVM_KMF_VALLOC) { 787 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 788 return(kva); 789 } 790 /* 791 * recover object offset from virtual address 792 */ 793 794 offset = kva - vm_map_min(kernel_map); 795 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 796 797 /* 798 * now allocate and map in the memory... note that we are the only ones 799 * whom should ever get a handle on this area of VM. 800 */ 801 802 loopva = kva; 803 while (size) { 804 simple_lock(&obj->vmobjlock); 805 pg = uvm_pagealloc(obj, offset, NULL, 0); 806 if (pg) { 807 pg->flags &= ~PG_BUSY; /* new page */ 808 UVM_PAGE_OWN(pg, NULL); 809 } 810 simple_unlock(&obj->vmobjlock); 811 812 /* 813 * out of memory? 814 */ 815 816 if (pg == NULL) { 817 if (flags & UVM_KMF_NOWAIT) { 818 /* free everything! */ 819 uvm_unmap(map, kva, kva + size); 820 return(0); 821 } else { 822 uvm_wait("km_getwait2"); /* sleep here */ 823 continue; 824 } 825 } 826 827 /* 828 * map it in: note that we call pmap_enter with the map and 829 * object unlocked in case we are kmem_map/kmem_object 830 * (because if pmap_enter wants to allocate out of kmem_object 831 * it will need to lock it itself!) 832 */ 833 if (UVM_OBJ_IS_INTRSAFE_OBJECT(obj)) { 834#if defined(PMAP_NEW) 835 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), 836 VM_PROT_ALL); 837#else 838 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 839 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 840#endif 841 } else { 842 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 843 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 844 } 845 loopva += PAGE_SIZE; 846 offset += PAGE_SIZE; 847 size -= PAGE_SIZE; 848 } 849 850 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 851 return(kva); 852} 853 854/* 855 * uvm_km_free: free an area of kernel memory 856 */ 857 858void 859uvm_km_free(map, addr, size) 860 vm_map_t map; 861 vaddr_t addr; 862 vsize_t size; 863{ 864 865 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 866} 867 868/* 869 * uvm_km_free_wakeup: free an area of kernel memory and wake up 870 * anyone waiting for vm space. 871 * 872 * => XXX: "wanted" bit + unlock&wait on other end? 873 */ 874 875void 876uvm_km_free_wakeup(map, addr, size) 877 vm_map_t map; 878 vaddr_t addr; 879 vsize_t size; 880{ 881 vm_map_entry_t dead_entries; 882 883 vm_map_lock(map); 884 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 885 &dead_entries); 886 thread_wakeup(map); 887 vm_map_unlock(map); 888 889 if (dead_entries != NULL) 890 uvm_unmap_detach(dead_entries, 0); 891} 892 893/* 894 * uvm_km_alloc1: allocate wired down memory in the kernel map. 895 * 896 * => we can sleep if needed 897 */ 898 899vaddr_t 900uvm_km_alloc1(map, size, zeroit) 901 vm_map_t map; 902 vsize_t size; 903 boolean_t zeroit; 904{ 905 vaddr_t kva, loopva, offset; 906 struct vm_page *pg; 907 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 908 909 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 910 911#ifdef DIAGNOSTIC 912 if (vm_map_pmap(map) != pmap_kernel()) 913 panic("uvm_km_alloc1"); 914#endif 915 916 size = round_page(size); 917 kva = vm_map_min(map); /* hint */ 918 919 /* 920 * allocate some virtual space 921 */ 922 923 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 924 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 925 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 926 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 927 return(0); 928 } 929 930 /* 931 * recover object offset from virtual address 932 */ 933 934 offset = kva - vm_map_min(kernel_map); 935 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 936 937 /* 938 * now allocate the memory. we must be careful about released pages. 939 */ 940 941 loopva = kva; 942 while (size) { 943 simple_lock(&uvm.kernel_object->vmobjlock); 944 pg = uvm_pagelookup(uvm.kernel_object, offset); 945 946 /* 947 * if we found a page in an unallocated region, it must be 948 * released 949 */ 950 if (pg) { 951 if ((pg->flags & PG_RELEASED) == 0) 952 panic("uvm_km_alloc1: non-released page"); 953 pg->flags |= PG_WANTED; 954 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 955 0, "km_alloc", 0); 956 continue; /* retry */ 957 } 958 959 /* allocate ram */ 960 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL, 0); 961 if (pg) { 962 pg->flags &= ~PG_BUSY; /* new page */ 963 UVM_PAGE_OWN(pg, NULL); 964 } 965 simple_unlock(&uvm.kernel_object->vmobjlock); 966 if (pg == NULL) { 967 uvm_wait("km_alloc1w"); /* wait for memory */ 968 continue; 969 } 970 971 /* 972 * map it in; note we're never called with an intrsafe 973 * object, so we always use regular old pmap_enter(). 974 */ 975 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 976 UVM_PROT_ALL, TRUE, VM_PROT_READ|VM_PROT_WRITE); 977 978 loopva += PAGE_SIZE; 979 offset += PAGE_SIZE; 980 size -= PAGE_SIZE; 981 } 982 983 /* 984 * zero on request (note that "size" is now zero due to the above loop 985 * so we need to subtract kva from loopva to reconstruct the size). 986 */ 987 988 if (zeroit) 989 memset((caddr_t)kva, 0, loopva - kva); 990 991 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 992 return(kva); 993} 994 995/* 996 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 997 * 998 * => memory is not allocated until fault time 999 */ 1000 1001vaddr_t 1002uvm_km_valloc(map, size) 1003 vm_map_t map; 1004 vsize_t size; 1005{ 1006 vaddr_t kva; 1007 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 1008 1009 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 1010 1011#ifdef DIAGNOSTIC 1012 if (vm_map_pmap(map) != pmap_kernel()) 1013 panic("uvm_km_valloc"); 1014#endif 1015 1016 size = round_page(size); 1017 kva = vm_map_min(map); /* hint */ 1018 1019 /* 1020 * allocate some virtual space. will be demand filled by kernel_object. 1021 */ 1022 1023 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 1024 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 1025 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 1026 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 1027 return(0); 1028 } 1029 1030 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 1031 return(kva); 1032} 1033 1034/* 1035 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 1036 * 1037 * => memory is not allocated until fault time 1038 * => if no room in map, wait for space to free, unless requested size 1039 * is larger than map (in which case we return 0) 1040 */ 1041 1042vaddr_t 1043uvm_km_valloc_wait(map, size) 1044 vm_map_t map; 1045 vsize_t size; 1046{ 1047 vaddr_t kva; 1048 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist); 1049 1050 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 1051 1052#ifdef DIAGNOSTIC 1053 if (vm_map_pmap(map) != pmap_kernel()) 1054 panic("uvm_km_valloc_wait"); 1055#endif 1056 1057 size = round_page(size); 1058 if (size > vm_map_max(map) - vm_map_min(map)) 1059 return(0); 1060 1061 while (1) { 1062 kva = vm_map_min(map); /* hint */ 1063 1064 /* 1065 * allocate some virtual space. will be demand filled 1066 * by kernel_object. 1067 */ 1068 1069 if (uvm_map(map, &kva, size, uvm.kernel_object, 1070 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, 1071 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 1072 == KERN_SUCCESS) { 1073 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 1074 return(kva); 1075 } 1076 1077 /* 1078 * failed. sleep for a while (on map) 1079 */ 1080 1081 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 1082 tsleep((caddr_t)map, PVM, "vallocwait", 0); 1083 } 1084 /*NOTREACHED*/ 1085} 1086 1087/* Sanity; must specify both or none. */ 1088#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 1089 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 1090#error Must specify MAP and UNMAP together. 1091#endif 1092 1093/* 1094 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 1095 * 1096 * => if the pmap specifies an alternate mapping method, we use it. 1097 */ 1098 1099/* ARGSUSED */ 1100vaddr_t 1101uvm_km_alloc_poolpage1(map, obj, waitok) 1102 vm_map_t map; 1103 struct uvm_object *obj; 1104 boolean_t waitok; 1105{ 1106#if defined(PMAP_MAP_POOLPAGE) 1107 struct vm_page *pg; 1108 vaddr_t va; 1109 1110 again: 1111 pg = uvm_pagealloc(NULL, 0, NULL, 0); 1112 if (pg == NULL) { 1113 if (waitok) { 1114 uvm_wait("plpg"); 1115 goto again; 1116 } else 1117 return (0); 1118 } 1119 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 1120 if (va == 0) 1121 uvm_pagefree(pg); 1122 return (va); 1123#else 1124 vaddr_t va; 1125 int s; 1126 1127 /* 1128 * NOTE: We may be called with a map that doens't require splimp 1129 * protection (e.g. kernel_map). However, it does not hurt to 1130 * go to splimp in this case (since unprocted maps will never be 1131 * accessed in interrupt context). 1132 * 1133 * XXX We may want to consider changing the interface to this 1134 * XXX function. 1135 */ 1136 1137 s = splimp(); 1138 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 1139 splx(s); 1140 return (va); 1141#endif /* PMAP_MAP_POOLPAGE */ 1142} 1143 1144/* 1145 * uvm_km_free_poolpage: free a previously allocated pool page 1146 * 1147 * => if the pmap specifies an alternate unmapping method, we use it. 1148 */ 1149 1150/* ARGSUSED */ 1151void 1152uvm_km_free_poolpage1(map, addr) 1153 vm_map_t map; 1154 vaddr_t addr; 1155{ 1156#if defined(PMAP_UNMAP_POOLPAGE) 1157 paddr_t pa; 1158 1159 pa = PMAP_UNMAP_POOLPAGE(addr); 1160 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 1161#else 1162 int s; 1163 1164 /* 1165 * NOTE: We may be called with a map that doens't require splimp 1166 * protection (e.g. kernel_map). However, it does not hurt to 1167 * go to splimp in this case (since unprocted maps will never be 1168 * accessed in interrupt context). 1169 * 1170 * XXX We may want to consider changing the interface to this 1171 * XXX function. 1172 */ 1173 1174 s = splimp(); 1175 uvm_km_free(map, addr, PAGE_SIZE); 1176 splx(s); 1177#endif /* PMAP_UNMAP_POOLPAGE */ 1178} 1179