uvm_page.c revision 1.8
1/* $NetBSD: uvm_page.c,v 1.23 1999/05/25 01:34:13 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 42 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69/* 70 * uvm_page.c: page ops. 71 */ 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/malloc.h> 76#include <sys/proc.h> 77 78#include <vm/vm.h> 79#include <vm/vm_page.h> 80#include <vm/vm_kern.h> 81 82#define UVM_PAGE /* pull in uvm_page.h functions */ 83#include <uvm/uvm.h> 84 85/* 86 * global vars... XXXCDC: move to uvm. structure. 87 */ 88 89/* 90 * physical memory config is stored in vm_physmem. 91 */ 92 93struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 94int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 95 96/* 97 * local variables 98 */ 99 100/* 101 * these variables record the values returned by vm_page_bootstrap, 102 * for debugging purposes. The implementation of uvm_pageboot_alloc 103 * and pmap_startup here also uses them internally. 104 */ 105 106static vaddr_t virtual_space_start; 107static vaddr_t virtual_space_end; 108 109/* 110 * we use a hash table with only one bucket during bootup. we will 111 * later rehash (resize) the hash table once malloc() is ready. 112 * we static allocate the bootstrap bucket below... 113 */ 114 115static struct pglist uvm_bootbucket; 116 117/* 118 * local prototypes 119 */ 120 121static void uvm_pageinsert __P((struct vm_page *)); 122 123 124/* 125 * inline functions 126 */ 127 128/* 129 * uvm_pageinsert: insert a page in the object and the hash table 130 * 131 * => caller must lock object 132 * => caller must lock page queues 133 * => call should have already set pg's object and offset pointers 134 * and bumped the version counter 135 */ 136 137__inline static void 138uvm_pageinsert(pg) 139 struct vm_page *pg; 140{ 141 struct pglist *buck; 142 int s; 143 144#ifdef DIAGNOSTIC 145 if (pg->flags & PG_TABLED) 146 panic("uvm_pageinsert: already inserted"); 147#endif 148 149 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 150 s = splimp(); 151 simple_lock(&uvm.hashlock); 152 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 153 simple_unlock(&uvm.hashlock); 154 splx(s); 155 156 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 157 pg->flags |= PG_TABLED; 158 pg->uobject->uo_npages++; 159 160} 161 162/* 163 * uvm_page_remove: remove page from object and hash 164 * 165 * => caller must lock object 166 * => caller must lock page queues 167 */ 168 169void __inline 170uvm_pageremove(pg) 171 struct vm_page *pg; 172{ 173 struct pglist *buck; 174 int s; 175 176#ifdef DIAGNOSTIC 177 if ((pg->flags & (PG_FAULTING)) != 0) 178 panic("uvm_pageremove: page is faulting"); 179#endif 180 181 if ((pg->flags & PG_TABLED) == 0) 182 return; /* XXX: log */ 183 184 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 185 s = splimp(); 186 simple_lock(&uvm.hashlock); 187 TAILQ_REMOVE(buck, pg, hashq); 188 simple_unlock(&uvm.hashlock); 189 splx(s); 190 191 /* object should be locked */ 192 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 193 194 pg->flags &= ~PG_TABLED; 195 pg->uobject->uo_npages--; 196 pg->uobject = NULL; 197 pg->version++; 198 199} 200 201/* 202 * uvm_page_init: init the page system. called from uvm_init(). 203 * 204 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 205 */ 206 207void 208uvm_page_init(kvm_startp, kvm_endp) 209 vaddr_t *kvm_startp, *kvm_endp; 210{ 211 int freepages, pagecount; 212 vm_page_t pagearray; 213 int lcv, n, i; 214 paddr_t paddr; 215 216 217 /* 218 * step 1: init the page queues and page queue locks 219 */ 220 for (lcv = 0; lcv < VM_NFREELIST; lcv++) 221 TAILQ_INIT(&uvm.page_free[lcv]); 222 TAILQ_INIT(&uvm.page_active); 223 TAILQ_INIT(&uvm.page_inactive_swp); 224 TAILQ_INIT(&uvm.page_inactive_obj); 225 simple_lock_init(&uvm.pageqlock); 226 simple_lock_init(&uvm.fpageqlock); 227 228 /* 229 * step 2: init the <obj,offset> => <page> hash table. for now 230 * we just have one bucket (the bootstrap bucket). later on we 231 * will malloc() new buckets as we dynamically resize the hash table. 232 */ 233 234 uvm.page_nhash = 1; /* 1 bucket */ 235 uvm.page_hashmask = 0; /* mask for hash function */ 236 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 237 TAILQ_INIT(uvm.page_hash); /* init hash table */ 238 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 239 240 /* 241 * step 3: allocate vm_page structures. 242 */ 243 244 /* 245 * sanity check: 246 * before calling this function the MD code is expected to register 247 * some free RAM with the uvm_page_physload() function. our job 248 * now is to allocate vm_page structures for this memory. 249 */ 250 251 if (vm_nphysseg == 0) 252 panic("vm_page_bootstrap: no memory pre-allocated"); 253 254 /* 255 * first calculate the number of free pages... 256 * 257 * note that we use start/end rather than avail_start/avail_end. 258 * this allows us to allocate extra vm_page structures in case we 259 * want to return some memory to the pool after booting. 260 */ 261 262 freepages = 0; 263 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 264 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 265 266 /* 267 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 268 * use. for each page of memory we use we need a vm_page structure. 269 * thus, the total number of pages we can use is the total size of 270 * the memory divided by the PAGE_SIZE plus the size of the vm_page 271 * structure. we add one to freepages as a fudge factor to avoid 272 * truncation errors (since we can only allocate in terms of whole 273 * pages). 274 */ 275 276 pagecount = ((freepages + 1) << PAGE_SHIFT) / 277 (PAGE_SIZE + sizeof(struct vm_page)); 278 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 279 sizeof(struct vm_page)); 280 bzero(pagearray, pagecount * sizeof(struct vm_page)); 281 282 /* 283 * step 4: init the vm_page structures and put them in the correct 284 * place... 285 */ 286 287 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 288 289 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 290 if (n > pagecount) { 291 printf("uvm_page_init: lost %d page(s) in init\n", 292 n - pagecount); 293 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 294 /* n = pagecount; */ 295 } 296 /* set up page array pointers */ 297 vm_physmem[lcv].pgs = pagearray; 298 pagearray += n; 299 pagecount -= n; 300 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 301 302 /* init and free vm_pages (we've already zeroed them) */ 303 paddr = ptoa(vm_physmem[lcv].start); 304 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 305 vm_physmem[lcv].pgs[i].phys_addr = paddr; 306 if (atop(paddr) >= vm_physmem[lcv].avail_start && 307 atop(paddr) <= vm_physmem[lcv].avail_end) { 308 uvmexp.npages++; 309 /* add page to free pool */ 310 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 311 } 312 } 313 } 314 /* 315 * step 5: pass up the values of virtual_space_start and 316 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 317 * layers of the VM. 318 */ 319 320 *kvm_startp = round_page(virtual_space_start); 321 *kvm_endp = trunc_page(virtual_space_end); 322 323 /* 324 * step 6: init pagedaemon lock 325 */ 326 327 simple_lock_init(&uvm.pagedaemon_lock); 328 329 /* 330 * step 7: init reserve thresholds 331 * XXXCDC - values may need adjusting 332 */ 333 uvmexp.reserve_pagedaemon = 1; 334 uvmexp.reserve_kernel = 5; 335 336 /* 337 * done! 338 */ 339 340} 341 342/* 343 * uvm_setpagesize: set the page size 344 * 345 * => sets page_shift and page_mask from uvmexp.pagesize. 346 * => XXXCDC: move global vars. 347 */ 348 349void 350uvm_setpagesize() 351{ 352 if (uvmexp.pagesize == 0) 353 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 354 uvmexp.pagemask = uvmexp.pagesize - 1; 355 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 356 panic("uvm_setpagesize: page size not a power of two"); 357 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 358 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 359 break; 360} 361 362/* 363 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 364 */ 365 366vaddr_t 367uvm_pageboot_alloc(size) 368 vsize_t size; 369{ 370#if defined(PMAP_STEAL_MEMORY) 371 vaddr_t addr; 372 373 /* 374 * defer bootstrap allocation to MD code (it may want to allocate 375 * from a direct-mapped segment). pmap_steal_memory should round 376 * off virtual_space_start/virtual_space_end. 377 */ 378 379 addr = pmap_steal_memory(size, &virtual_space_start, 380 &virtual_space_end); 381 382 return(addr); 383 384#else /* !PMAP_STEAL_MEMORY */ 385 386 static boolean_t initialized = FALSE; 387 vaddr_t addr, vaddr; 388 paddr_t paddr; 389 390 /* round to page size */ 391 size = round_page(size); 392 393 /* 394 * on first call to this function, initialize ourselves. 395 */ 396 if (initialized == FALSE) { 397 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 398 399 /* round it the way we like it */ 400 virtual_space_start = round_page(virtual_space_start); 401 virtual_space_end = trunc_page(virtual_space_end); 402 403 initialized = TRUE; 404 } 405 406 /* 407 * allocate virtual memory for this request 408 */ 409 if (virtual_space_start == virtual_space_end || 410 (virtual_space_end - virtual_space_start) < size) 411 panic("uvm_pageboot_alloc: out of virtual space"); 412 413 addr = virtual_space_start; 414 415#ifdef PMAP_GROWKERNEL 416 /* 417 * If the kernel pmap can't map the requested space, 418 * then allocate more resources for it. 419 */ 420 if (uvm_maxkaddr < (addr + size)) { 421 uvm_maxkaddr = pmap_growkernel(addr + size); 422 if (uvm_maxkaddr < (addr + size)) 423 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 424 } 425#endif 426 427 virtual_space_start += size; 428 429 /* 430 * allocate and mapin physical pages to back new virtual pages 431 */ 432 433 for (vaddr = round_page(addr) ; vaddr < addr + size ; 434 vaddr += PAGE_SIZE) { 435 436 if (!uvm_page_physget(&paddr)) 437 panic("uvm_pageboot_alloc: out of memory"); 438 439 /* XXX: should be wired, but some pmaps don't like that ... */ 440#if defined(PMAP_NEW) 441 /* 442 * Note this memory is no longer managed, so using 443 * pmap_kenter is safe. 444 */ 445 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 446#else 447 pmap_enter(pmap_kernel(), vaddr, paddr, 448 VM_PROT_READ|VM_PROT_WRITE, FALSE, 449 VM_PROT_READ|VM_PROT_WRITE); 450#endif 451 452 } 453 return(addr); 454#endif /* PMAP_STEAL_MEMORY */ 455} 456 457#if !defined(PMAP_STEAL_MEMORY) 458/* 459 * uvm_page_physget: "steal" one page from the vm_physmem structure. 460 * 461 * => attempt to allocate it off the end of a segment in which the "avail" 462 * values match the start/end values. if we can't do that, then we 463 * will advance both values (making them equal, and removing some 464 * vm_page structures from the non-avail area). 465 * => return false if out of memory. 466 */ 467 468boolean_t 469uvm_page_physget(paddrp) 470 paddr_t *paddrp; 471{ 472 int lcv, x; 473 474 /* pass 1: try allocating from a matching end */ 475#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 476 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 477 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 478#else 479 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 480#endif 481 { 482 483 if (vm_physmem[lcv].pgs) 484 panic("vm_page_physget: called _after_ bootstrap"); 485 486 /* try from front */ 487 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 488 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 489 *paddrp = ptoa(vm_physmem[lcv].avail_start); 490 vm_physmem[lcv].avail_start++; 491 vm_physmem[lcv].start++; 492 /* nothing left? nuke it */ 493 if (vm_physmem[lcv].avail_start == 494 vm_physmem[lcv].end) { 495 if (vm_nphysseg == 1) 496 panic("vm_page_physget: out of memory!"); 497 vm_nphysseg--; 498 for (x = lcv ; x < vm_nphysseg ; x++) 499 /* structure copy */ 500 vm_physmem[x] = vm_physmem[x+1]; 501 } 502 return (TRUE); 503 } 504 505 /* try from rear */ 506 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 507 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 508 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 509 vm_physmem[lcv].avail_end--; 510 vm_physmem[lcv].end--; 511 /* nothing left? nuke it */ 512 if (vm_physmem[lcv].avail_end == 513 vm_physmem[lcv].start) { 514 if (vm_nphysseg == 1) 515 panic("vm_page_physget: out of memory!"); 516 vm_nphysseg--; 517 for (x = lcv ; x < vm_nphysseg ; x++) 518 /* structure copy */ 519 vm_physmem[x] = vm_physmem[x+1]; 520 } 521 return (TRUE); 522 } 523 } 524 525 /* pass2: forget about matching ends, just allocate something */ 526#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 527 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 528 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 529#else 530 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 531#endif 532 { 533 534 /* any room in this bank? */ 535 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 536 continue; /* nope */ 537 538 *paddrp = ptoa(vm_physmem[lcv].avail_start); 539 vm_physmem[lcv].avail_start++; 540 /* truncate! */ 541 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 542 543 /* nothing left? nuke it */ 544 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 545 if (vm_nphysseg == 1) 546 panic("vm_page_physget: out of memory!"); 547 vm_nphysseg--; 548 for (x = lcv ; x < vm_nphysseg ; x++) 549 /* structure copy */ 550 vm_physmem[x] = vm_physmem[x+1]; 551 } 552 return (TRUE); 553 } 554 555 return (FALSE); /* whoops! */ 556} 557#endif /* PMAP_STEAL_MEMORY */ 558 559/* 560 * uvm_page_physload: load physical memory into VM system 561 * 562 * => all args are PFs 563 * => all pages in start/end get vm_page structures 564 * => areas marked by avail_start/avail_end get added to the free page pool 565 * => we are limited to VM_PHYSSEG_MAX physical memory segments 566 */ 567 568void 569uvm_page_physload(start, end, avail_start, avail_end, free_list) 570 vaddr_t start, end, avail_start, avail_end; 571 int free_list; 572{ 573 int preload, lcv; 574 psize_t npages; 575 struct vm_page *pgs; 576 struct vm_physseg *ps; 577 578 if (uvmexp.pagesize == 0) 579 panic("vm_page_physload: page size not set!"); 580 581 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 582 panic("uvm_page_physload: bad free list %d\n", free_list); 583 584 /* 585 * do we have room? 586 */ 587 if (vm_nphysseg == VM_PHYSSEG_MAX) { 588 printf("vm_page_physload: unable to load physical memory " 589 "segment\n"); 590 printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n", 591 VM_PHYSSEG_MAX, start, end); 592 return; 593 } 594 595 /* 596 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 597 * called yet, so malloc is not available). 598 */ 599 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 600 if (vm_physmem[lcv].pgs) 601 break; 602 } 603 preload = (lcv == vm_nphysseg); 604 605 /* 606 * if VM is already running, attempt to malloc() vm_page structures 607 */ 608 if (!preload) { 609#if defined(VM_PHYSSEG_NOADD) 610 panic("vm_page_physload: tried to add RAM after vm_mem_init"); 611#else 612 /* XXXCDC: need some sort of lockout for this case */ 613 paddr_t paddr; 614 npages = end - start; /* # of pages */ 615 MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages, 616 M_VMPAGE, M_NOWAIT); 617 if (pgs == NULL) { 618 printf("vm_page_physload: can not malloc vm_page " 619 "structs for segment\n"); 620 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 621 return; 622 } 623 /* zero data, init phys_addr and free_list, and free pages */ 624 bzero(pgs, sizeof(struct vm_page) * npages); 625 for (lcv = 0, paddr = ptoa(start) ; 626 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 627 pgs[lcv].phys_addr = paddr; 628 pgs[lcv].free_list = free_list; 629 if (atop(paddr) >= avail_start && 630 atop(paddr) <= avail_end) 631 uvm_pagefree(&pgs[lcv]); 632 } 633 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 634 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 635#endif 636 } else { 637 638 /* gcc complains if these don't get init'd */ 639 pgs = NULL; 640 npages = 0; 641 642 } 643 644 /* 645 * now insert us in the proper place in vm_physmem[] 646 */ 647 648#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 649 650 /* random: put it at the end (easy!) */ 651 ps = &vm_physmem[vm_nphysseg]; 652 653#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 654 655 { 656 int x; 657 /* sort by address for binary search */ 658 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 659 if (start < vm_physmem[lcv].start) 660 break; 661 ps = &vm_physmem[lcv]; 662 /* move back other entries, if necessary ... */ 663 for (x = vm_nphysseg ; x > lcv ; x--) 664 /* structure copy */ 665 vm_physmem[x] = vm_physmem[x - 1]; 666 } 667 668#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 669 670 { 671 int x; 672 /* sort by largest segment first */ 673 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 674 if ((end - start) > 675 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 676 break; 677 ps = &vm_physmem[lcv]; 678 /* move back other entries, if necessary ... */ 679 for (x = vm_nphysseg ; x > lcv ; x--) 680 /* structure copy */ 681 vm_physmem[x] = vm_physmem[x - 1]; 682 } 683 684#else 685 686 panic("vm_page_physload: unknown physseg strategy selected!"); 687 688#endif 689 690 ps->start = start; 691 ps->end = end; 692 ps->avail_start = avail_start; 693 ps->avail_end = avail_end; 694 if (preload) { 695 ps->pgs = NULL; 696 } else { 697 ps->pgs = pgs; 698 ps->lastpg = pgs + npages - 1; 699 } 700 ps->free_list = free_list; 701 vm_nphysseg++; 702 703 /* 704 * done! 705 */ 706 707 if (!preload) 708 uvm_page_rehash(); 709 710 return; 711} 712 713/* 714 * uvm_page_rehash: reallocate hash table based on number of free pages. 715 */ 716 717void 718uvm_page_rehash() 719{ 720 int freepages, lcv, bucketcount, s, oldcount; 721 struct pglist *newbuckets, *oldbuckets; 722 struct vm_page *pg; 723 724 /* 725 * compute number of pages that can go in the free pool 726 */ 727 728 freepages = 0; 729 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 730 freepages += 731 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 732 733 /* 734 * compute number of buckets needed for this number of pages 735 */ 736 737 bucketcount = 1; 738 while (bucketcount < freepages) 739 bucketcount = bucketcount * 2; 740 741 /* 742 * malloc new buckets 743 */ 744 745 MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount, 746 M_VMPBUCKET, M_NOWAIT); 747 if (newbuckets == NULL) { 748 printf("vm_page_physrehash: WARNING: could not grow page " 749 "hash table\n"); 750 return; 751 } 752 for (lcv = 0 ; lcv < bucketcount ; lcv++) 753 TAILQ_INIT(&newbuckets[lcv]); 754 755 /* 756 * now replace the old buckets with the new ones and rehash everything 757 */ 758 759 s = splimp(); 760 simple_lock(&uvm.hashlock); 761 /* swap old for new ... */ 762 oldbuckets = uvm.page_hash; 763 oldcount = uvm.page_nhash; 764 uvm.page_hash = newbuckets; 765 uvm.page_nhash = bucketcount; 766 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 767 768 /* ... and rehash */ 769 for (lcv = 0 ; lcv < oldcount ; lcv++) { 770 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 771 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 772 TAILQ_INSERT_TAIL( 773 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 774 pg, hashq); 775 } 776 } 777 simple_unlock(&uvm.hashlock); 778 splx(s); 779 780 /* 781 * free old bucket array if we malloc'd it previously 782 */ 783 784 if (oldbuckets != &uvm_bootbucket) 785 FREE(oldbuckets, M_VMPBUCKET); 786 787 /* 788 * done 789 */ 790 return; 791} 792 793 794#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 795 796void uvm_page_physdump __P((void)); /* SHUT UP GCC */ 797 798/* call from DDB */ 799void 800uvm_page_physdump() 801{ 802 int lcv; 803 804 printf("rehash: physical memory config [segs=%d of %d]:\n", 805 vm_nphysseg, VM_PHYSSEG_MAX); 806 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 807 printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start, 808 vm_physmem[lcv].end, vm_physmem[lcv].avail_start, 809 vm_physmem[lcv].avail_end); 810 printf("STRATEGY = "); 811 switch (VM_PHYSSEG_STRAT) { 812 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 813 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 814 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 815 default: printf("<<UNKNOWN>>!!!!\n"); 816 } 817 printf("number of buckets = %d\n", uvm.page_nhash); 818} 819#endif 820 821/* 822 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 823 * 824 * => return null if no pages free 825 * => wake up pagedaemon if number of free pages drops below low water mark 826 * => if obj != NULL, obj must be locked (to put in hash) 827 * => if anon != NULL, anon must be locked (to put in anon) 828 * => only one of obj or anon can be non-null 829 * => caller must activate/deactivate page if it is not wired. 830 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 831 */ 832 833struct vm_page * 834uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list) 835 struct uvm_object *obj; 836 vaddr_t off; 837 int flags; 838 struct vm_anon *anon; 839 int strat, free_list; 840{ 841 int lcv, s; 842 struct vm_page *pg; 843 struct pglist *freeq; 844 boolean_t use_reserve; 845 846#ifdef DIAGNOSTIC 847 /* sanity check */ 848 if (obj && anon) 849 panic("uvm_pagealloc: obj and anon != NULL"); 850#endif 851 852 s = uvm_lock_fpageq(); /* lock free page queue */ 853 854 /* 855 * check to see if we need to generate some free pages waking 856 * the pagedaemon. 857 */ 858 859 if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg && 860 uvmexp.inactive < uvmexp.inactarg)) 861 thread_wakeup(&uvm.pagedaemon); 862 863 /* 864 * fail if any of these conditions is true: 865 * [1] there really are no free pages, or 866 * [2] only kernel "reserved" pages remain and 867 * the page isn't being allocated to a kernel object. 868 * [3] only pagedaemon "reserved" pages remain and 869 * the requestor isn't the pagedaemon. 870 */ 871 872 use_reserve = (flags & UVM_PGA_USERESERVE) || 873 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 874 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 875 (uvmexp.free <= uvmexp.reserve_pagedaemon && 876 !(use_reserve && curproc == uvm.pagedaemon_proc))) 877 goto fail; 878 879 again: 880 switch (strat) { 881 case UVM_PGA_STRAT_NORMAL: 882 /* Check all freelists in descending priority order. */ 883 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 884 freeq = &uvm.page_free[lcv]; 885 if ((pg = freeq->tqh_first) != NULL) 886 goto gotit; 887 } 888 889 /* No pages free! */ 890 goto fail; 891 892 case UVM_PGA_STRAT_ONLY: 893 case UVM_PGA_STRAT_FALLBACK: 894 /* Attempt to allocate from the specified free list. */ 895#ifdef DIAGNOSTIC 896 if (free_list >= VM_NFREELIST || free_list < 0) 897 panic("uvm_pagealloc_strat: bad free list %d", 898 free_list); 899#endif 900 freeq = &uvm.page_free[free_list]; 901 if ((pg = freeq->tqh_first) != NULL) 902 goto gotit; 903 904 /* Fall back, if possible. */ 905 if (strat == UVM_PGA_STRAT_FALLBACK) { 906 strat = UVM_PGA_STRAT_NORMAL; 907 goto again; 908 } 909 910 /* No pages free! */ 911 goto fail; 912 913 default: 914 panic("uvm_pagealloc_strat: bad strat %d", strat); 915 /* NOTREACHED */ 916 } 917 918 gotit: 919 TAILQ_REMOVE(freeq, pg, pageq); 920 uvmexp.free--; 921 922 uvm_unlock_fpageq(s); /* unlock free page queue */ 923 924 pg->offset = off; 925 pg->uobject = obj; 926 pg->uanon = anon; 927 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 928 pg->version++; 929 pg->wire_count = 0; 930 pg->loan_count = 0; 931 if (anon) { 932 anon->u.an_page = pg; 933 pg->pqflags = PQ_ANON; 934 } else { 935 if (obj) 936 uvm_pageinsert(pg); 937 pg->pqflags = 0; 938 } 939#if defined(UVM_PAGE_TRKOWN) 940 pg->owner_tag = NULL; 941#endif 942 UVM_PAGE_OWN(pg, "new alloc"); 943 944 return(pg); 945 946 fail: 947 uvm_unlock_fpageq(s); 948 return (NULL); 949} 950 951/* 952 * uvm_pagealloc_contig: allocate contiguous memory. 953 * 954 * XXX - fix comment. 955 */ 956 957vaddr_t 958uvm_pagealloc_contig(size, low, high, alignment) 959 vaddr_t size; 960 vaddr_t low, high; 961 vaddr_t alignment; 962{ 963 struct pglist pglist; 964 struct vm_page *pg; 965 vaddr_t addr, temp_addr; 966 967 size = round_page(size); 968 969 TAILQ_INIT(&pglist); 970 if (uvm_pglistalloc(size, low, high, alignment, 0, 971 &pglist, 1, FALSE)) 972 return 0; 973 addr = vm_map_min(kernel_map); 974 if (uvm_map(kernel_map, &addr, size, NULL, UVM_UNKNOWN_OFFSET, 975 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 976 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 977 uvm_pglistfree(&pglist); 978 return 0; 979 } 980 temp_addr = addr; 981 for (pg = TAILQ_FIRST(&pglist); pg != NULL; 982 pg = TAILQ_NEXT(pg, pageq)) { 983 pg->uobject = uvm.kernel_object; 984 pg->offset = temp_addr - vm_map_min(kernel_map); 985 uvm_pageinsert(pg); 986 uvm_pagewire(pg); 987#if defined(PMAP_NEW) 988 pmap_kenter_pa(temp_addr, VM_PAGE_TO_PHYS(pg), 989 VM_PROT_READ|VM_PROT_WRITE); 990#else 991 pmap_enter(pmap_kernel(), temp_addr, VM_PAGE_TO_PHYS(pg), 992 VM_PROT_READ|VM_PROT_WRITE, TRUE, 993 VM_PROT_READ|VM_PROT_WRITE); 994#endif 995 temp_addr += PAGE_SIZE; 996 } 997 return addr; 998} 999 1000/* 1001 * uvm_pagerealloc: reallocate a page from one object to another 1002 * 1003 * => both objects must be locked 1004 */ 1005 1006void 1007uvm_pagerealloc(pg, newobj, newoff) 1008 struct vm_page *pg; 1009 struct uvm_object *newobj; 1010 vaddr_t newoff; 1011{ 1012 /* 1013 * remove it from the old object 1014 */ 1015 1016 if (pg->uobject) { 1017 uvm_pageremove(pg); 1018 } 1019 1020 /* 1021 * put it in the new object 1022 */ 1023 1024 if (newobj) { 1025 pg->uobject = newobj; 1026 pg->offset = newoff; 1027 pg->version++; 1028 uvm_pageinsert(pg); 1029 } 1030 1031 return; 1032} 1033 1034 1035/* 1036 * uvm_pagefree: free page 1037 * 1038 * => erase page's identity (i.e. remove from hash/object) 1039 * => put page on free list 1040 * => caller must lock owning object (either anon or uvm_object) 1041 * => caller must lock page queues 1042 * => assumes all valid mappings of pg are gone 1043 */ 1044 1045void uvm_pagefree(pg) 1046 1047struct vm_page *pg; 1048 1049{ 1050 int s; 1051 int saved_loan_count = pg->loan_count; 1052 1053 /* 1054 * if the page was an object page (and thus "TABLED"), remove it 1055 * from the object. 1056 */ 1057 1058 if (pg->flags & PG_TABLED) { 1059 1060 /* 1061 * if the object page is on loan we are going to drop ownership. 1062 * it is possible that an anon will take over as owner for this 1063 * page later on. the anon will want a !PG_CLEAN page so that 1064 * it knows it needs to allocate swap if it wants to page the 1065 * page out. 1066 */ 1067 1068 if (saved_loan_count) 1069 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1070 1071 uvm_pageremove(pg); 1072 1073 /* 1074 * if our page was on loan, then we just lost control over it 1075 * (in fact, if it was loaned to an anon, the anon may have 1076 * already taken over ownership of the page by now and thus 1077 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1078 * return (when the last loan is dropped, then the page can be 1079 * freed by whatever was holding the last loan). 1080 */ 1081 if (saved_loan_count) 1082 return; 1083 1084 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1085 1086 /* 1087 * if our page is owned by an anon and is loaned out to the 1088 * kernel then we just want to drop ownership and return. 1089 * the kernel must free the page when all its loans clear ... 1090 * note that the kernel can't change the loan status of our 1091 * page as long as we are holding PQ lock. 1092 */ 1093 pg->pqflags &= ~PQ_ANON; 1094 pg->uanon = NULL; 1095 return; 1096 } 1097 1098#ifdef DIAGNOSTIC 1099 if (saved_loan_count) { 1100 printf("uvm_pagefree: warning: freeing page with a loan " 1101 "count of %d\n", saved_loan_count); 1102 panic("uvm_pagefree: loan count"); 1103 } 1104#endif 1105 1106 1107 /* 1108 * now remove the page from the queues 1109 */ 1110 1111 if (pg->pqflags & PQ_ACTIVE) { 1112 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1113 pg->pqflags &= ~PQ_ACTIVE; 1114 uvmexp.active--; 1115 } 1116 if (pg->pqflags & PQ_INACTIVE) { 1117 if (pg->pqflags & PQ_SWAPBACKED) 1118 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1119 else 1120 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1121 pg->pqflags &= ~PQ_INACTIVE; 1122 uvmexp.inactive--; 1123 } 1124 1125 /* 1126 * if the page was wired, unwire it now. 1127 */ 1128 if (pg->wire_count) 1129 { 1130 pg->wire_count = 0; 1131 uvmexp.wired--; 1132 } 1133 1134 /* 1135 * and put on free queue 1136 */ 1137 1138 s = uvm_lock_fpageq(); 1139 TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)], 1140 pg, pageq); 1141 pg->pqflags = PQ_FREE; 1142#ifdef DEBUG 1143 pg->uobject = (void *)0xdeadbeef; 1144 pg->offset = 0xdeadbeef; 1145 pg->uanon = (void *)0xdeadbeef; 1146#endif 1147 uvmexp.free++; 1148 uvm_unlock_fpageq(s); 1149} 1150 1151#if defined(UVM_PAGE_TRKOWN) 1152/* 1153 * uvm_page_own: set or release page ownership 1154 * 1155 * => this is a debugging function that keeps track of who sets PG_BUSY 1156 * and where they do it. it can be used to track down problems 1157 * such a process setting "PG_BUSY" and never releasing it. 1158 * => page's object [if any] must be locked 1159 * => if "tag" is NULL then we are releasing page ownership 1160 */ 1161void 1162uvm_page_own(pg, tag) 1163 struct vm_page *pg; 1164 char *tag; 1165{ 1166 /* gain ownership? */ 1167 if (tag) { 1168 if (pg->owner_tag) { 1169 printf("uvm_page_own: page %p already owned " 1170 "by proc %d [%s]\n", pg, 1171 pg->owner, pg->owner_tag); 1172 panic("uvm_page_own"); 1173 } 1174 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1175 pg->owner_tag = tag; 1176 return; 1177 } 1178 1179 /* drop ownership */ 1180 if (pg->owner_tag == NULL) { 1181 printf("uvm_page_own: dropping ownership of an non-owned " 1182 "page (%p)\n", pg); 1183 panic("uvm_page_own"); 1184 } 1185 pg->owner_tag = NULL; 1186 return; 1187} 1188#endif 1189