uvm_km.c revision 1.19
1/* $NetBSD: uvm_km.c,v 1.19 1999/03/24 03:45:27 cgd Exp $ */ 2 3/* 4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! 5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< 6 */ 7/* 8 * Copyright (c) 1997 Charles D. Cranor and Washington University. 9 * Copyright (c) 1991, 1993, The Regents of the University of California. 10 * 11 * All rights reserved. 12 * 13 * This code is derived from software contributed to Berkeley by 14 * The Mach Operating System project at Carnegie-Mellon University. 15 * 16 * Redistribution and use in source and binary forms, with or without 17 * modification, are permitted provided that the following conditions 18 * are met: 19 * 1. Redistributions of source code must retain the above copyright 20 * notice, this list of conditions and the following disclaimer. 21 * 2. Redistributions in binary form must reproduce the above copyright 22 * notice, this list of conditions and the following disclaimer in the 23 * documentation and/or other materials provided with the distribution. 24 * 3. All advertising materials mentioning features or use of this software 25 * must display the following acknowledgement: 26 * This product includes software developed by Charles D. Cranor, 27 * Washington University, the University of California, Berkeley and 28 * its contributors. 29 * 4. Neither the name of the University nor the names of its contributors 30 * may be used to endorse or promote products derived from this software 31 * without specific prior written permission. 32 * 33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 36 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 43 * SUCH DAMAGE. 44 * 45 * @(#)vm_kern.c 8.3 (Berkeley) 1/12/94 46 * from: Id: uvm_km.c,v 1.1.2.14 1998/02/06 05:19:27 chs Exp 47 * 48 * 49 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 50 * All rights reserved. 51 * 52 * Permission to use, copy, modify and distribute this software and 53 * its documentation is hereby granted, provided that both the copyright 54 * notice and this permission notice appear in all copies of the 55 * software, derivative works or modified versions, and any portions 56 * thereof, and that both notices appear in supporting documentation. 57 * 58 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 59 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 60 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 61 * 62 * Carnegie Mellon requests users of this software to return to 63 * 64 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 65 * School of Computer Science 66 * Carnegie Mellon University 67 * Pittsburgh PA 15213-3890 68 * 69 * any improvements or extensions that they make and grant Carnegie the 70 * rights to redistribute these changes. 71 */ 72 73#include "opt_uvmhist.h" 74#include "opt_pmap_new.h" 75 76/* 77 * uvm_km.c: handle kernel memory allocation and management 78 */ 79 80/* 81 * overview of kernel memory management: 82 * 83 * the kernel virtual address space is mapped by "kernel_map." kernel_map 84 * starts at VM_MIN_KERNEL_ADDRESS and goes to VM_MAX_KERNEL_ADDRESS. 85 * note that VM_MIN_KERNEL_ADDRESS is equal to vm_map_min(kernel_map). 86 * 87 * the kernel_map has several "submaps." submaps can only appear in 88 * the kernel_map (user processes can't use them). submaps "take over" 89 * the management of a sub-range of the kernel's address space. submaps 90 * are typically allocated at boot time and are never released. kernel 91 * virtual address space that is mapped by a submap is locked by the 92 * submap's lock -- not the kernel_map's lock. 93 * 94 * thus, the useful feature of submaps is that they allow us to break 95 * up the locking and protection of the kernel address space into smaller 96 * chunks. 97 * 98 * the vm system has several standard kernel submaps, including: 99 * kmem_map => contains only wired kernel memory for the kernel 100 * malloc. *** access to kmem_map must be protected 101 * by splimp() because we are allowed to call malloc() 102 * at interrupt time *** 103 * mb_map => memory for large mbufs, *** protected by splimp *** 104 * pager_map => used to map "buf" structures into kernel space 105 * exec_map => used during exec to handle exec args 106 * etc... 107 * 108 * the kernel allocates its private memory out of special uvm_objects whose 109 * reference count is set to UVM_OBJ_KERN (thus indicating that the objects 110 * are "special" and never die). all kernel objects should be thought of 111 * as large, fixed-sized, sparsely populated uvm_objects. each kernel 112 * object is equal to the size of kernel virtual address space (i.e. the 113 * value "VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS"). 114 * 115 * most kernel private memory lives in kernel_object. the only exception 116 * to this is for memory that belongs to submaps that must be protected 117 * by splimp(). each of these submaps has their own private kernel 118 * object (e.g. kmem_object, mb_object). 119 * 120 * note that just because a kernel object spans the entire kernel virutal 121 * address space doesn't mean that it has to be mapped into the entire space. 122 * large chunks of a kernel object's space go unused either because 123 * that area of kernel VM is unmapped, or there is some other type of 124 * object mapped into that range (e.g. a vnode). for submap's kernel 125 * objects, the only part of the object that can ever be populated is the 126 * offsets that are managed by the submap. 127 * 128 * note that the "offset" in a kernel object is always the kernel virtual 129 * address minus the VM_MIN_KERNEL_ADDRESS (aka vm_map_min(kernel_map)). 130 * example: 131 * suppose VM_MIN_KERNEL_ADDRESS is 0xf8000000 and the kernel does a 132 * uvm_km_alloc(kernel_map, PAGE_SIZE) [allocate 1 wired down page in the 133 * kernel map]. if uvm_km_alloc returns virtual address 0xf8235000, 134 * then that means that the page at offset 0x235000 in kernel_object is 135 * mapped at 0xf8235000. 136 * 137 * note that the offsets in kmem_object and mb_object also follow this 138 * rule. this means that the offsets for kmem_object must fall in the 139 * range of [vm_map_min(kmem_object) - vm_map_min(kernel_map)] to 140 * [vm_map_max(kmem_object) - vm_map_min(kernel_map)], so the offsets 141 * in those objects will typically not start at zero. 142 * 143 * kernel object have one other special property: when the kernel virtual 144 * memory mapping them is unmapped, the backing memory in the object is 145 * freed right away. this is done with the uvm_km_pgremove() function. 146 * this has to be done because there is no backing store for kernel pages 147 * and no need to save them after they are no longer referenced. 148 */ 149 150#include <sys/param.h> 151#include <sys/systm.h> 152#include <sys/proc.h> 153 154#include <vm/vm.h> 155#include <vm/vm_page.h> 156#include <vm/vm_kern.h> 157 158#include <uvm/uvm.h> 159 160/* 161 * global data structures 162 */ 163 164vm_map_t kernel_map = NULL; 165 166/* 167 * local functions 168 */ 169 170static int uvm_km_get __P((struct uvm_object *, vaddr_t, 171 vm_page_t *, int *, int, vm_prot_t, int, int)); 172/* 173 * local data structues 174 */ 175 176static struct vm_map kernel_map_store; 177static struct uvm_object kmem_object_store; 178static struct uvm_object mb_object_store; 179 180static struct uvm_pagerops km_pager = { 181 NULL, /* init */ 182 NULL, /* reference */ 183 NULL, /* detach */ 184 NULL, /* fault */ 185 NULL, /* flush */ 186 uvm_km_get, /* get */ 187 /* ... rest are NULL */ 188}; 189 190/* 191 * uvm_km_get: pager get function for kernel objects 192 * 193 * => currently we do not support pageout to the swap area, so this 194 * pager is very simple. eventually we may want an anonymous 195 * object pager which will do paging. 196 * => XXXCDC: this pager should be phased out in favor of the aobj pager 197 */ 198 199 200static int 201uvm_km_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 202 struct uvm_object *uobj; 203 vaddr_t offset; 204 struct vm_page **pps; 205 int *npagesp; 206 int centeridx, advice, flags; 207 vm_prot_t access_type; 208{ 209 vaddr_t current_offset; 210 vm_page_t ptmp; 211 int lcv, gotpages, maxpages; 212 boolean_t done; 213 UVMHIST_FUNC("uvm_km_get"); UVMHIST_CALLED(maphist); 214 215 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 216 217 /* 218 * get number of pages 219 */ 220 221 maxpages = *npagesp; 222 223 /* 224 * step 1: handled the case where fault data structures are locked. 225 */ 226 227 if (flags & PGO_LOCKED) { 228 229 /* 230 * step 1a: get pages that are already resident. only do 231 * this if the data structures are locked (i.e. the first time 232 * through). 233 */ 234 235 done = TRUE; /* be optimistic */ 236 gotpages = 0; /* # of pages we got so far */ 237 238 for (lcv = 0, current_offset = offset ; 239 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 240 241 /* do we care about this page? if not, skip it */ 242 if (pps[lcv] == PGO_DONTCARE) 243 continue; 244 245 /* lookup page */ 246 ptmp = uvm_pagelookup(uobj, current_offset); 247 248 /* null? attempt to allocate the page */ 249 if (ptmp == NULL) { 250 ptmp = uvm_pagealloc(uobj, current_offset, 251 NULL); 252 if (ptmp) { 253 /* new page */ 254 ptmp->flags &= ~(PG_BUSY|PG_FAKE); 255 UVM_PAGE_OWN(ptmp, NULL); 256 uvm_pagezero(ptmp); 257 } 258 } 259 260 /* 261 * to be useful must get a non-busy, non-released page 262 */ 263 if (ptmp == NULL || 264 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 265 if (lcv == centeridx || 266 (flags & PGO_ALLPAGES) != 0) 267 /* need to do a wait or I/O! */ 268 done = FALSE; 269 continue; 270 } 271 272 /* 273 * useful page: busy/lock it and plug it in our 274 * result array 275 */ 276 277 /* caller must un-busy this page */ 278 ptmp->flags |= PG_BUSY; 279 UVM_PAGE_OWN(ptmp, "uvm_km_get1"); 280 pps[lcv] = ptmp; 281 gotpages++; 282 283 } /* "for" lcv loop */ 284 285 /* 286 * step 1b: now we've either done everything needed or we 287 * to unlock and do some waiting or I/O. 288 */ 289 290 UVMHIST_LOG(maphist, "<- done (done=%d)", done, 0,0,0); 291 292 *npagesp = gotpages; 293 if (done) 294 return(VM_PAGER_OK); /* bingo! */ 295 else 296 return(VM_PAGER_UNLOCK); /* EEK! Need to 297 * unlock and I/O */ 298 } 299 300 /* 301 * step 2: get non-resident or busy pages. 302 * object is locked. data structures are unlocked. 303 */ 304 305 for (lcv = 0, current_offset = offset ; 306 lcv < maxpages ; lcv++, current_offset += PAGE_SIZE) { 307 308 /* skip over pages we've already gotten or don't want */ 309 /* skip over pages we don't _have_ to get */ 310 if (pps[lcv] != NULL || 311 (lcv != centeridx && (flags & PGO_ALLPAGES) == 0)) 312 continue; 313 314 /* 315 * we have yet to locate the current page (pps[lcv]). we 316 * first look for a page that is already at the current offset. 317 * if we find a page, we check to see if it is busy or 318 * released. if that is the case, then we sleep on the page 319 * until it is no longer busy or released and repeat the 320 * lookup. if the page we found is neither busy nor 321 * released, then we busy it (so we own it) and plug it into 322 * pps[lcv]. this 'break's the following while loop and 323 * indicates we are ready to move on to the next page in the 324 * "lcv" loop above. 325 * 326 * if we exit the while loop with pps[lcv] still set to NULL, 327 * then it means that we allocated a new busy/fake/clean page 328 * ptmp in the object and we need to do I/O to fill in the 329 * data. 330 */ 331 332 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 333 334 /* look for a current page */ 335 ptmp = uvm_pagelookup(uobj, current_offset); 336 337 /* nope? allocate one now (if we can) */ 338 if (ptmp == NULL) { 339 340 ptmp = uvm_pagealloc(uobj, current_offset, 341 NULL); /* alloc */ 342 343 /* out of RAM? */ 344 if (ptmp == NULL) { 345 simple_unlock(&uobj->vmobjlock); 346 uvm_wait("kmgetwait1"); 347 simple_lock(&uobj->vmobjlock); 348 /* goto top of pps while loop */ 349 continue; 350 } 351 352 /* 353 * got new page ready for I/O. break pps 354 * while loop. pps[lcv] is still NULL. 355 */ 356 break; 357 } 358 359 /* page is there, see if we need to wait on it */ 360 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 361 ptmp->flags |= PG_WANTED; 362 UVM_UNLOCK_AND_WAIT(ptmp,&uobj->vmobjlock, 0, 363 "uvn_get",0); 364 simple_lock(&uobj->vmobjlock); 365 continue; /* goto top of pps while loop */ 366 } 367 368 /* 369 * if we get here then the page has become resident 370 * and unbusy between steps 1 and 2. we busy it now 371 * (so we own it) and set pps[lcv] (so that we exit 372 * the while loop). caller must un-busy. 373 */ 374 ptmp->flags |= PG_BUSY; 375 UVM_PAGE_OWN(ptmp, "uvm_km_get2"); 376 pps[lcv] = ptmp; 377 } 378 379 /* 380 * if we own the a valid page at the correct offset, pps[lcv] 381 * will point to it. nothing more to do except go to the 382 * next page. 383 */ 384 385 if (pps[lcv]) 386 continue; /* next lcv */ 387 388 /* 389 * we have a "fake/busy/clean" page that we just allocated. 390 * do the needed "i/o" (in this case that means zero it). 391 */ 392 393 uvm_pagezero(ptmp); 394 ptmp->flags &= ~(PG_FAKE); 395 pps[lcv] = ptmp; 396 397 } /* lcv loop */ 398 399 /* 400 * finally, unlock object and return. 401 */ 402 403 simple_unlock(&uobj->vmobjlock); 404 UVMHIST_LOG(maphist, "<- done (OK)",0,0,0,0); 405 return(VM_PAGER_OK); 406} 407 408/* 409 * uvm_km_init: init kernel maps and objects to reflect reality (i.e. 410 * KVM already allocated for text, data, bss, and static data structures). 411 * 412 * => KVM is defined by VM_MIN_KERNEL_ADDRESS/VM_MAX_KERNEL_ADDRESS. 413 * we assume that [min -> start] has already been allocated and that 414 * "end" is the end. 415 */ 416 417void 418uvm_km_init(start, end) 419 vaddr_t start, end; 420{ 421 vaddr_t base = VM_MIN_KERNEL_ADDRESS; 422 423 /* 424 * first, init kernel memory objects. 425 */ 426 427 /* kernel_object: for pageable anonymous kernel memory */ 428 uvm.kernel_object = uao_create(VM_MAX_KERNEL_ADDRESS - 429 VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNOBJ); 430 431 /* kmem_object: for malloc'd memory (wired, protected by splimp) */ 432 simple_lock_init(&kmem_object_store.vmobjlock); 433 kmem_object_store.pgops = &km_pager; 434 TAILQ_INIT(&kmem_object_store.memq); 435 kmem_object_store.uo_npages = 0; 436 /* we are special. we never die */ 437 kmem_object_store.uo_refs = UVM_OBJ_KERN; 438 uvmexp.kmem_object = &kmem_object_store; 439 440 /* mb_object: for mbuf memory (always wired, protected by splimp) */ 441 simple_lock_init(&mb_object_store.vmobjlock); 442 mb_object_store.pgops = &km_pager; 443 TAILQ_INIT(&mb_object_store.memq); 444 mb_object_store.uo_npages = 0; 445 /* we are special. we never die */ 446 mb_object_store.uo_refs = UVM_OBJ_KERN; 447 uvmexp.mb_object = &mb_object_store; 448 449 /* 450 * init the map and reserve allready allocated kernel space 451 * before installing. 452 */ 453 454 uvm_map_setup(&kernel_map_store, base, end, FALSE); 455 kernel_map_store.pmap = pmap_kernel(); 456 if (uvm_map(&kernel_map_store, &base, start - base, NULL, 457 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, 458 UVM_INH_NONE, UVM_ADV_RANDOM,UVM_FLAG_FIXED)) != KERN_SUCCESS) 459 panic("uvm_km_init: could not reserve space for kernel"); 460 461 /* 462 * install! 463 */ 464 465 kernel_map = &kernel_map_store; 466} 467 468/* 469 * uvm_km_suballoc: allocate a submap in the kernel map. once a submap 470 * is allocated all references to that area of VM must go through it. this 471 * allows the locking of VAs in kernel_map to be broken up into regions. 472 * 473 * => if `fixed' is true, *min specifies where the region described 474 * by the submap must start 475 * => if submap is non NULL we use that as the submap, otherwise we 476 * alloc a new map 477 */ 478struct vm_map * 479uvm_km_suballoc(map, min, max, size, pageable, fixed, submap) 480 struct vm_map *map; 481 vaddr_t *min, *max; /* OUT, OUT */ 482 vsize_t size; 483 boolean_t pageable; 484 boolean_t fixed; 485 struct vm_map *submap; 486{ 487 int mapflags = UVM_FLAG_NOMERGE | (fixed ? UVM_FLAG_FIXED : 0); 488 489 size = round_page(size); /* round up to pagesize */ 490 491 /* 492 * first allocate a blank spot in the parent map 493 */ 494 495 if (uvm_map(map, min, size, NULL, UVM_UNKNOWN_OFFSET, 496 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 497 UVM_ADV_RANDOM, mapflags)) != KERN_SUCCESS) { 498 panic("uvm_km_suballoc: unable to allocate space in parent map"); 499 } 500 501 /* 502 * set VM bounds (min is filled in by uvm_map) 503 */ 504 505 *max = *min + size; 506 507 /* 508 * add references to pmap and create or init the submap 509 */ 510 511 pmap_reference(vm_map_pmap(map)); 512 if (submap == NULL) { 513 submap = uvm_map_create(vm_map_pmap(map), *min, *max, pageable); 514 if (submap == NULL) 515 panic("uvm_km_suballoc: unable to create submap"); 516 } else { 517 uvm_map_setup(submap, *min, *max, pageable); 518 submap->pmap = vm_map_pmap(map); 519 } 520 521 /* 522 * now let uvm_map_submap plug in it... 523 */ 524 525 if (uvm_map_submap(map, *min, *max, submap) != KERN_SUCCESS) 526 panic("uvm_km_suballoc: submap allocation failed"); 527 528 return(submap); 529} 530 531/* 532 * uvm_km_pgremove: remove pages from a kernel uvm_object. 533 * 534 * => when you unmap a part of anonymous kernel memory you want to toss 535 * the pages right away. (this gets called from uvm_unmap_...). 536 */ 537 538#define UKM_HASH_PENALTY 4 /* a guess */ 539 540void 541uvm_km_pgremove(uobj, start, end) 542 struct uvm_object *uobj; 543 vaddr_t start, end; 544{ 545 boolean_t by_list, is_aobj; 546 struct vm_page *pp, *ppnext; 547 vaddr_t curoff; 548 UVMHIST_FUNC("uvm_km_pgremove"); UVMHIST_CALLED(maphist); 549 550 simple_lock(&uobj->vmobjlock); /* lock object */ 551 552 /* is uobj an aobj? */ 553 is_aobj = uobj->pgops == &aobj_pager; 554 555 /* choose cheapest traversal */ 556 by_list = (uobj->uo_npages <= 557 ((end - start) >> PAGE_SHIFT) * UKM_HASH_PENALTY); 558 559 if (by_list) 560 goto loop_by_list; 561 562 /* by hash */ 563 564 for (curoff = start ; curoff < end ; curoff += PAGE_SIZE) { 565 pp = uvm_pagelookup(uobj, curoff); 566 if (pp == NULL) 567 continue; 568 569 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 570 pp->flags & PG_BUSY, 0, 0); 571 /* now do the actual work */ 572 if (pp->flags & PG_BUSY) 573 /* owner must check for this when done */ 574 pp->flags |= PG_RELEASED; 575 else { 576 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); 577 578 /* 579 * if this kernel object is an aobj, free the swap slot. 580 */ 581 if (is_aobj) { 582 int slot = uao_set_swslot(uobj, 583 curoff >> PAGE_SHIFT, 584 0); 585 586 if (slot) 587 uvm_swap_free(slot, 1); 588 } 589 590 uvm_lock_pageq(); 591 uvm_pagefree(pp); 592 uvm_unlock_pageq(); 593 } 594 /* done */ 595 596 } 597 simple_unlock(&uobj->vmobjlock); 598 return; 599 600loop_by_list: 601 602 for (pp = uobj->memq.tqh_first ; pp != NULL ; pp = ppnext) { 603 604 ppnext = pp->listq.tqe_next; 605 if (pp->offset < start || pp->offset >= end) { 606 continue; 607 } 608 609 UVMHIST_LOG(maphist," page 0x%x, busy=%d", pp, 610 pp->flags & PG_BUSY, 0, 0); 611 /* now do the actual work */ 612 if (pp->flags & PG_BUSY) 613 /* owner must check for this when done */ 614 pp->flags |= PG_RELEASED; 615 else { 616 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); 617 618 /* 619 * if this kernel object is an aobj, free the swap slot. 620 */ 621 if (is_aobj) { 622 int slot = uao_set_swslot(uobj, 623 pp->offset >> PAGE_SHIFT, 0); 624 625 if (slot) 626 uvm_swap_free(slot, 1); 627 } 628 629 uvm_lock_pageq(); 630 uvm_pagefree(pp); 631 uvm_unlock_pageq(); 632 } 633 /* done */ 634 635 } 636 simple_unlock(&uobj->vmobjlock); 637 return; 638} 639 640 641/* 642 * uvm_km_kmemalloc: lower level kernel memory allocator for malloc() 643 * 644 * => we map wired memory into the specified map using the obj passed in 645 * => NOTE: we can return NULL even if we can wait if there is not enough 646 * free VM space in the map... caller should be prepared to handle 647 * this case. 648 * => we return KVA of memory allocated 649 * => flags: NOWAIT, VALLOC - just allocate VA, TRYLOCK - fail if we can't 650 * lock the map 651 */ 652 653vaddr_t 654uvm_km_kmemalloc(map, obj, size, flags) 655 vm_map_t map; 656 struct uvm_object *obj; 657 vsize_t size; 658 int flags; 659{ 660 vaddr_t kva, loopva; 661 vaddr_t offset; 662 struct vm_page *pg; 663 UVMHIST_FUNC("uvm_km_kmemalloc"); UVMHIST_CALLED(maphist); 664 665 666 UVMHIST_LOG(maphist," (map=0x%x, obj=0x%x, size=0x%x, flags=%d)", 667 map, obj, size, flags); 668#ifdef DIAGNOSTIC 669 /* sanity check */ 670 if (vm_map_pmap(map) != pmap_kernel()) 671 panic("uvm_km_kmemalloc: invalid map"); 672#endif 673 674 /* 675 * setup for call 676 */ 677 678 size = round_page(size); 679 kva = vm_map_min(map); /* hint */ 680 681 /* 682 * allocate some virtual space 683 */ 684 685 if (uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, 686 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 687 UVM_ADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) 688 != KERN_SUCCESS) { 689 UVMHIST_LOG(maphist, "<- done (no VM)",0,0,0,0); 690 return(0); 691 } 692 693 /* 694 * if all we wanted was VA, return now 695 */ 696 697 if (flags & UVM_KMF_VALLOC) { 698 UVMHIST_LOG(maphist,"<- done valloc (kva=0x%x)", kva,0,0,0); 699 return(kva); 700 } 701 /* 702 * recover object offset from virtual address 703 */ 704 705 offset = kva - vm_map_min(kernel_map); 706 UVMHIST_LOG(maphist, " kva=0x%x, offset=0x%x", kva, offset,0,0); 707 708 /* 709 * now allocate and map in the memory... note that we are the only ones 710 * whom should ever get a handle on this area of VM. 711 */ 712 713 loopva = kva; 714 while (size) { 715 simple_lock(&obj->vmobjlock); 716 pg = uvm_pagealloc(obj, offset, NULL); 717 if (pg) { 718 pg->flags &= ~PG_BUSY; /* new page */ 719 UVM_PAGE_OWN(pg, NULL); 720 } 721 simple_unlock(&obj->vmobjlock); 722 723 /* 724 * out of memory? 725 */ 726 727 if (pg == NULL) { 728 if (flags & UVM_KMF_NOWAIT) { 729 /* free everything! */ 730 uvm_unmap(map, kva, kva + size); 731 return(0); 732 } else { 733 uvm_wait("km_getwait2"); /* sleep here */ 734 continue; 735 } 736 } 737 738 /* 739 * map it in: note that we call pmap_enter with the map and 740 * object unlocked in case we are kmem_map/kmem_object 741 * (because if pmap_enter wants to allocate out of kmem_object 742 * it will need to lock it itself!) 743 */ 744#if defined(PMAP_NEW) 745 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), VM_PROT_ALL); 746#else 747 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 748 UVM_PROT_ALL, TRUE); 749#endif 750 loopva += PAGE_SIZE; 751 offset += PAGE_SIZE; 752 size -= PAGE_SIZE; 753 } 754 755 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 756 return(kva); 757} 758 759/* 760 * uvm_km_free: free an area of kernel memory 761 */ 762 763void 764uvm_km_free(map, addr, size) 765 vm_map_t map; 766 vaddr_t addr; 767 vsize_t size; 768{ 769 770 uvm_unmap(map, trunc_page(addr), round_page(addr+size)); 771} 772 773/* 774 * uvm_km_free_wakeup: free an area of kernel memory and wake up 775 * anyone waiting for vm space. 776 * 777 * => XXX: "wanted" bit + unlock&wait on other end? 778 */ 779 780void 781uvm_km_free_wakeup(map, addr, size) 782 vm_map_t map; 783 vaddr_t addr; 784 vsize_t size; 785{ 786 vm_map_entry_t dead_entries; 787 788 vm_map_lock(map); 789 (void)uvm_unmap_remove(map, trunc_page(addr), round_page(addr+size), 790 &dead_entries); 791 thread_wakeup(map); 792 vm_map_unlock(map); 793 794 if (dead_entries != NULL) 795 uvm_unmap_detach(dead_entries, 0); 796} 797 798/* 799 * uvm_km_alloc1: allocate wired down memory in the kernel map. 800 * 801 * => we can sleep if needed 802 */ 803 804vaddr_t 805uvm_km_alloc1(map, size, zeroit) 806 vm_map_t map; 807 vsize_t size; 808 boolean_t zeroit; 809{ 810 vaddr_t kva, loopva, offset; 811 struct vm_page *pg; 812 UVMHIST_FUNC("uvm_km_alloc1"); UVMHIST_CALLED(maphist); 813 814 UVMHIST_LOG(maphist,"(map=0x%x, size=0x%x)", map, size,0,0); 815 816#ifdef DIAGNOSTIC 817 if (vm_map_pmap(map) != pmap_kernel()) 818 panic("uvm_km_alloc1"); 819#endif 820 821 size = round_page(size); 822 kva = vm_map_min(map); /* hint */ 823 824 /* 825 * allocate some virtual space 826 */ 827 828 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 829 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 830 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 831 UVMHIST_LOG(maphist,"<- done (no VM)",0,0,0,0); 832 return(0); 833 } 834 835 /* 836 * recover object offset from virtual address 837 */ 838 839 offset = kva - vm_map_min(kernel_map); 840 UVMHIST_LOG(maphist," kva=0x%x, offset=0x%x", kva, offset,0,0); 841 842 /* 843 * now allocate the memory. we must be careful about released pages. 844 */ 845 846 loopva = kva; 847 while (size) { 848 simple_lock(&uvm.kernel_object->vmobjlock); 849 pg = uvm_pagelookup(uvm.kernel_object, offset); 850 851 /* 852 * if we found a page in an unallocated region, it must be 853 * released 854 */ 855 if (pg) { 856 if ((pg->flags & PG_RELEASED) == 0) 857 panic("uvm_km_alloc1: non-released page"); 858 pg->flags |= PG_WANTED; 859 UVM_UNLOCK_AND_WAIT(pg, &uvm.kernel_object->vmobjlock, 860 0, "km_alloc", 0); 861 continue; /* retry */ 862 } 863 864 /* allocate ram */ 865 pg = uvm_pagealloc(uvm.kernel_object, offset, NULL); 866 if (pg) { 867 pg->flags &= ~PG_BUSY; /* new page */ 868 UVM_PAGE_OWN(pg, NULL); 869 } 870 simple_unlock(&uvm.kernel_object->vmobjlock); 871 if (pg == NULL) { 872 uvm_wait("km_alloc1w"); /* wait for memory */ 873 continue; 874 } 875 876 /* map it in */ 877#if defined(PMAP_NEW) 878 pmap_kenter_pa(loopva, VM_PAGE_TO_PHYS(pg), UVM_PROT_ALL); 879#else 880 pmap_enter(map->pmap, loopva, VM_PAGE_TO_PHYS(pg), 881 UVM_PROT_ALL, TRUE); 882#endif 883 loopva += PAGE_SIZE; 884 offset += PAGE_SIZE; 885 size -= PAGE_SIZE; 886 } 887 888 /* 889 * zero on request (note that "size" is now zero due to the above loop 890 * so we need to subtract kva from loopva to reconstruct the size). 891 */ 892 893 if (zeroit) 894 memset((caddr_t)kva, 0, loopva - kva); 895 896 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 897 return(kva); 898} 899 900/* 901 * uvm_km_valloc: allocate zero-fill memory in the kernel's address space 902 * 903 * => memory is not allocated until fault time 904 */ 905 906vaddr_t 907uvm_km_valloc(map, size) 908 vm_map_t map; 909 vsize_t size; 910{ 911 vaddr_t kva; 912 UVMHIST_FUNC("uvm_km_valloc"); UVMHIST_CALLED(maphist); 913 914 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 915 916#ifdef DIAGNOSTIC 917 if (vm_map_pmap(map) != pmap_kernel()) 918 panic("uvm_km_valloc"); 919#endif 920 921 size = round_page(size); 922 kva = vm_map_min(map); /* hint */ 923 924 /* 925 * allocate some virtual space. will be demand filled by kernel_object. 926 */ 927 928 if (uvm_map(map, &kva, size, uvm.kernel_object, UVM_UNKNOWN_OFFSET, 929 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 930 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 931 UVMHIST_LOG(maphist, "<- done (no VM)", 0,0,0,0); 932 return(0); 933 } 934 935 UVMHIST_LOG(maphist, "<- done (kva=0x%x)", kva,0,0,0); 936 return(kva); 937} 938 939/* 940 * uvm_km_valloc_wait: allocate zero-fill memory in the kernel's address space 941 * 942 * => memory is not allocated until fault time 943 * => if no room in map, wait for space to free, unless requested size 944 * is larger than map (in which case we return 0) 945 */ 946 947vaddr_t 948uvm_km_valloc_wait(map, size) 949 vm_map_t map; 950 vsize_t size; 951{ 952 vaddr_t kva; 953 UVMHIST_FUNC("uvm_km_valloc_wait"); UVMHIST_CALLED(maphist); 954 955 UVMHIST_LOG(maphist, "(map=0x%x, size=0x%x)", map, size, 0,0); 956 957#ifdef DIAGNOSTIC 958 if (vm_map_pmap(map) != pmap_kernel()) 959 panic("uvm_km_valloc_wait"); 960#endif 961 962 size = round_page(size); 963 if (size > vm_map_max(map) - vm_map_min(map)) 964 return(0); 965 966 while (1) { 967 kva = vm_map_min(map); /* hint */ 968 969 /* 970 * allocate some virtual space. will be demand filled 971 * by kernel_object. 972 */ 973 974 if (uvm_map(map, &kva, size, uvm.kernel_object, 975 UVM_UNKNOWN_OFFSET, UVM_MAPFLAG(UVM_PROT_ALL, 976 UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) 977 == KERN_SUCCESS) { 978 UVMHIST_LOG(maphist,"<- done (kva=0x%x)", kva,0,0,0); 979 return(kva); 980 } 981 982 /* 983 * failed. sleep for a while (on map) 984 */ 985 986 UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0); 987 tsleep((caddr_t)map, PVM, "vallocwait", 0); 988 } 989 /*NOTREACHED*/ 990} 991 992/* Sanity; must specify both or none. */ 993#if (defined(PMAP_MAP_POOLPAGE) || defined(PMAP_UNMAP_POOLPAGE)) && \ 994 (!defined(PMAP_MAP_POOLPAGE) || !defined(PMAP_UNMAP_POOLPAGE)) 995#error Must specify MAP and UNMAP together. 996#endif 997 998/* 999 * uvm_km_alloc_poolpage: allocate a page for the pool allocator 1000 * 1001 * => if the pmap specifies an alternate mapping method, we use it. 1002 */ 1003 1004/* ARGSUSED */ 1005vaddr_t 1006uvm_km_alloc_poolpage1(map, obj, waitok) 1007 vm_map_t map; 1008 struct uvm_object *obj; 1009 boolean_t waitok; 1010{ 1011#if defined(PMAP_MAP_POOLPAGE) 1012 struct vm_page *pg; 1013 vaddr_t va; 1014 1015 again: 1016 pg = uvm_pagealloc(NULL, 0, NULL); 1017 if (pg == NULL) { 1018 if (waitok) { 1019 uvm_wait("plpg"); 1020 goto again; 1021 } else 1022 return (0); 1023 } 1024 va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); 1025 if (va == 0) 1026 uvm_pagefree(pg); 1027 return (va); 1028#else 1029 vaddr_t va; 1030 int s; 1031 1032 /* 1033 * NOTE: We may be called with a map that doens't require splimp 1034 * protection (e.g. kernel_map). However, it does not hurt to 1035 * go to splimp in this case (since unprocted maps will never be 1036 * accessed in interrupt context). 1037 * 1038 * XXX We may want to consider changing the interface to this 1039 * XXX function. 1040 */ 1041 1042 s = splimp(); 1043 va = uvm_km_kmemalloc(map, obj, PAGE_SIZE, waitok ? 0 : UVM_KMF_NOWAIT); 1044 splx(s); 1045 return (va); 1046#endif /* PMAP_MAP_POOLPAGE */ 1047} 1048 1049/* 1050 * uvm_km_free_poolpage: free a previously allocated pool page 1051 * 1052 * => if the pmap specifies an alternate unmapping method, we use it. 1053 */ 1054 1055/* ARGSUSED */ 1056void 1057uvm_km_free_poolpage1(map, addr) 1058 vm_map_t map; 1059 vaddr_t addr; 1060{ 1061#if defined(PMAP_UNMAP_POOLPAGE) 1062 paddr_t pa; 1063 1064 pa = PMAP_UNMAP_POOLPAGE(addr); 1065 uvm_pagefree(PHYS_TO_VM_PAGE(pa)); 1066#else 1067 int s; 1068 1069 /* 1070 * NOTE: We may be called with a map that doens't require splimp 1071 * protection (e.g. kernel_map). However, it does not hurt to 1072 * go to splimp in this case (since unprocted maps will never be 1073 * accessed in interrupt context). 1074 * 1075 * XXX We may want to consider changing the interface to this 1076 * XXX function. 1077 */ 1078 1079 s = splimp(); 1080 uvm_km_free(map, addr, PAGE_SIZE); 1081 splx(s); 1082#endif /* PMAP_UNMAP_POOLPAGE */ 1083} 1084