uvm_page.c revision 1.19
1/* $OpenBSD: uvm_page.c,v 1.19 2001/07/25 14:47:59 art Exp $ */ 2/* $NetBSD: uvm_page.c,v 1.29 1999/12/30 16:09:47 eeh Exp $ */ 3 4/* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * Copyright (c) 1991, 1993, The Regents of the University of California. 7 * 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * The Mach Operating System project at Carnegie-Mellon University. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by Charles D. Cranor, 24 * Washington University, the University of California, Berkeley and 25 * its contributors. 26 * 4. Neither the name of the University nor the names of its contributors 27 * may be used to endorse or promote products derived from this software 28 * without specific prior written permission. 29 * 30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 33 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 40 * SUCH DAMAGE. 41 * 42 * @(#)vm_page.c 8.3 (Berkeley) 3/21/94 43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp 44 * 45 * 46 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 47 * All rights reserved. 48 * 49 * Permission to use, copy, modify and distribute this software and 50 * its documentation is hereby granted, provided that both the copyright 51 * notice and this permission notice appear in all copies of the 52 * software, derivative works or modified versions, and any portions 53 * thereof, and that both notices appear in supporting documentation. 54 * 55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 56 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 58 * 59 * Carnegie Mellon requests users of this software to return to 60 * 61 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 62 * School of Computer Science 63 * Carnegie Mellon University 64 * Pittsburgh PA 15213-3890 65 * 66 * any improvements or extensions that they make and grant Carnegie the 67 * rights to redistribute these changes. 68 */ 69 70/* 71 * uvm_page.c: page ops. 72 */ 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/malloc.h> 77#include <sys/proc.h> 78 79#include <vm/vm.h> 80#include <vm/vm_page.h> 81#include <vm/vm_kern.h> 82 83#define UVM_PAGE /* pull in uvm_page.h functions */ 84#include <uvm/uvm.h> 85 86/* 87 * global vars... XXXCDC: move to uvm. structure. 88 */ 89 90/* 91 * physical memory config is stored in vm_physmem. 92 */ 93 94struct vm_physseg vm_physmem[VM_PHYSSEG_MAX]; /* XXXCDC: uvm.physmem */ 95int vm_nphysseg = 0; /* XXXCDC: uvm.nphysseg */ 96 97/* 98 * local variables 99 */ 100 101/* 102 * these variables record the values returned by vm_page_bootstrap, 103 * for debugging purposes. The implementation of uvm_pageboot_alloc 104 * and pmap_startup here also uses them internally. 105 */ 106 107static vaddr_t virtual_space_start; 108static vaddr_t virtual_space_end; 109 110/* 111 * we use a hash table with only one bucket during bootup. we will 112 * later rehash (resize) the hash table once the allocator is ready. 113 * we static allocate the one bootstrap bucket below... 114 */ 115 116static struct pglist uvm_bootbucket; 117 118/* 119 * local prototypes 120 */ 121 122static void uvm_pageinsert __P((struct vm_page *)); 123 124 125/* 126 * inline functions 127 */ 128 129/* 130 * uvm_pageinsert: insert a page in the object and the hash table 131 * 132 * => caller must lock object 133 * => caller must lock page queues 134 * => call should have already set pg's object and offset pointers 135 * and bumped the version counter 136 */ 137 138__inline static void 139uvm_pageinsert(pg) 140 struct vm_page *pg; 141{ 142 struct pglist *buck; 143 int s; 144 145#ifdef DIAGNOSTIC 146 if (pg->flags & PG_TABLED) 147 panic("uvm_pageinsert: already inserted"); 148#endif 149 150 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 151 s = splimp(); 152 simple_lock(&uvm.hashlock); 153 TAILQ_INSERT_TAIL(buck, pg, hashq); /* put in hash */ 154 simple_unlock(&uvm.hashlock); 155 splx(s); 156 157 TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */ 158 pg->flags |= PG_TABLED; 159 pg->uobject->uo_npages++; 160 161} 162 163/* 164 * uvm_page_remove: remove page from object and hash 165 * 166 * => caller must lock object 167 * => caller must lock page queues 168 */ 169 170void __inline 171uvm_pageremove(pg) 172 struct vm_page *pg; 173{ 174 struct pglist *buck; 175 int s; 176 177#ifdef DIAGNOSTIC 178 if ((pg->flags & (PG_FAULTING)) != 0) 179 panic("uvm_pageremove: page is faulting"); 180#endif 181 182 if ((pg->flags & PG_TABLED) == 0) 183 return; /* XXX: log */ 184 185 buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)]; 186 s = splimp(); 187 simple_lock(&uvm.hashlock); 188 TAILQ_REMOVE(buck, pg, hashq); 189 simple_unlock(&uvm.hashlock); 190 splx(s); 191 192 /* object should be locked */ 193 TAILQ_REMOVE(&pg->uobject->memq, pg, listq); 194 195 pg->flags &= ~PG_TABLED; 196 pg->uobject->uo_npages--; 197 pg->uobject = NULL; 198 pg->version++; 199 200} 201 202/* 203 * uvm_page_init: init the page system. called from uvm_init(). 204 * 205 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp 206 */ 207 208void 209uvm_page_init(kvm_startp, kvm_endp) 210 vaddr_t *kvm_startp, *kvm_endp; 211{ 212 vsize_t freepages, pagecount, n; 213 vm_page_t pagearray; 214 int lcv, i; 215 paddr_t paddr; 216 217 218 /* 219 * step 1: init the page queues and page queue locks 220 */ 221 for (lcv = 0; lcv < VM_NFREELIST; lcv++) 222 TAILQ_INIT(&uvm.page_free[lcv]); 223 TAILQ_INIT(&uvm.page_active); 224 TAILQ_INIT(&uvm.page_inactive_swp); 225 TAILQ_INIT(&uvm.page_inactive_obj); 226 simple_lock_init(&uvm.pageqlock); 227 simple_lock_init(&uvm.fpageqlock); 228 229 /* 230 * step 2: init the <obj,offset> => <page> hash table. for now 231 * we just have one bucket (the bootstrap bucket). later on we 232 * will allocate new buckets as we dynamically resize the hash table. 233 */ 234 235 uvm.page_nhash = 1; /* 1 bucket */ 236 uvm.page_hashmask = 0; /* mask for hash function */ 237 uvm.page_hash = &uvm_bootbucket; /* install bootstrap bucket */ 238 TAILQ_INIT(uvm.page_hash); /* init hash table */ 239 simple_lock_init(&uvm.hashlock); /* init hash table lock */ 240 241 /* 242 * step 3: allocate vm_page structures. 243 */ 244 245 /* 246 * sanity check: 247 * before calling this function the MD code is expected to register 248 * some free RAM with the uvm_page_physload() function. our job 249 * now is to allocate vm_page structures for this memory. 250 */ 251 252 if (vm_nphysseg == 0) 253 panic("vm_page_bootstrap: no memory pre-allocated"); 254 255 /* 256 * first calculate the number of free pages... 257 * 258 * note that we use start/end rather than avail_start/avail_end. 259 * this allows us to allocate extra vm_page structures in case we 260 * want to return some memory to the pool after booting. 261 */ 262 263 freepages = 0; 264 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 265 freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start); 266 267 /* 268 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can 269 * use. for each page of memory we use we need a vm_page structure. 270 * thus, the total number of pages we can use is the total size of 271 * the memory divided by the PAGE_SIZE plus the size of the vm_page 272 * structure. we add one to freepages as a fudge factor to avoid 273 * truncation errors (since we can only allocate in terms of whole 274 * pages). 275 */ 276 277 pagecount = ((freepages + 1) << PAGE_SHIFT) / 278 (PAGE_SIZE + sizeof(struct vm_page)); 279 pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount * 280 sizeof(struct vm_page)); 281 memset(pagearray, 0, pagecount * sizeof(struct vm_page)); 282 283 /* 284 * step 4: init the vm_page structures and put them in the correct 285 * place... 286 */ 287 288 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 289 290 n = vm_physmem[lcv].end - vm_physmem[lcv].start; 291 if (n > pagecount) { 292 printf("uvm_page_init: lost %ld page(s) in init\n", 293 (long)(n - pagecount)); 294 panic("uvm_page_init"); /* XXXCDC: shouldn't happen? */ 295 /* n = pagecount; */ 296 } 297 /* set up page array pointers */ 298 vm_physmem[lcv].pgs = pagearray; 299 pagearray += n; 300 pagecount -= n; 301 vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1); 302 303 /* init and free vm_pages (we've already zeroed them) */ 304 paddr = ptoa(vm_physmem[lcv].start); 305 for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) { 306 vm_physmem[lcv].pgs[i].phys_addr = paddr; 307 if (atop(paddr) >= vm_physmem[lcv].avail_start && 308 atop(paddr) <= vm_physmem[lcv].avail_end) { 309 uvmexp.npages++; 310 /* add page to free pool */ 311 uvm_pagefree(&vm_physmem[lcv].pgs[i]); 312 } 313 } 314 } 315 /* 316 * step 5: pass up the values of virtual_space_start and 317 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper 318 * layers of the VM. 319 */ 320 321 *kvm_startp = round_page(virtual_space_start); 322 *kvm_endp = trunc_page(virtual_space_end); 323 324 /* 325 * step 6: init pagedaemon lock 326 */ 327 328 simple_lock_init(&uvm.pagedaemon_lock); 329 330 /* 331 * step 7: init reserve thresholds 332 * XXXCDC - values may need adjusting 333 */ 334 uvmexp.reserve_pagedaemon = 4; 335 uvmexp.reserve_kernel = 6; 336 337 /* 338 * done! 339 */ 340 341 uvm.page_init_done = TRUE; 342} 343 344/* 345 * uvm_setpagesize: set the page size 346 * 347 * => sets page_shift and page_mask from uvmexp.pagesize. 348 * => XXXCDC: move global vars. 349 */ 350 351void 352uvm_setpagesize() 353{ 354 if (uvmexp.pagesize == 0) 355 uvmexp.pagesize = DEFAULT_PAGE_SIZE; 356 uvmexp.pagemask = uvmexp.pagesize - 1; 357 if ((uvmexp.pagemask & uvmexp.pagesize) != 0) 358 panic("uvm_setpagesize: page size not a power of two"); 359 for (uvmexp.pageshift = 0; ; uvmexp.pageshift++) 360 if ((1 << uvmexp.pageshift) == uvmexp.pagesize) 361 break; 362} 363 364/* 365 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping 366 */ 367 368vaddr_t 369uvm_pageboot_alloc(size) 370 vsize_t size; 371{ 372#if defined(PMAP_STEAL_MEMORY) 373 vaddr_t addr; 374 375 /* 376 * defer bootstrap allocation to MD code (it may want to allocate 377 * from a direct-mapped segment). pmap_steal_memory should round 378 * off virtual_space_start/virtual_space_end. 379 */ 380 381 addr = pmap_steal_memory(size, &virtual_space_start, 382 &virtual_space_end); 383 384 return(addr); 385 386#else /* !PMAP_STEAL_MEMORY */ 387 388 static boolean_t initialized = FALSE; 389 vaddr_t addr, vaddr; 390 paddr_t paddr; 391 392 /* round to page size */ 393 size = round_page(size); 394 395 /* 396 * on first call to this function, initialize ourselves. 397 */ 398 if (initialized == FALSE) { 399 pmap_virtual_space(&virtual_space_start, &virtual_space_end); 400 401 /* round it the way we like it */ 402 virtual_space_start = round_page(virtual_space_start); 403 virtual_space_end = trunc_page(virtual_space_end); 404 405 initialized = TRUE; 406 } 407 408 /* 409 * allocate virtual memory for this request 410 */ 411 if (virtual_space_start == virtual_space_end || 412 (virtual_space_end - virtual_space_start) < size) 413 panic("uvm_pageboot_alloc: out of virtual space"); 414 415 addr = virtual_space_start; 416 417#ifdef PMAP_GROWKERNEL 418 /* 419 * If the kernel pmap can't map the requested space, 420 * then allocate more resources for it. 421 */ 422 if (uvm_maxkaddr < (addr + size)) { 423 uvm_maxkaddr = pmap_growkernel(addr + size); 424 if (uvm_maxkaddr < (addr + size)) 425 panic("uvm_pageboot_alloc: pmap_growkernel() failed"); 426 } 427#endif 428 429 virtual_space_start += size; 430 431 /* 432 * allocate and mapin physical pages to back new virtual pages 433 */ 434 435 for (vaddr = round_page(addr) ; vaddr < addr + size ; 436 vaddr += PAGE_SIZE) { 437 438 if (!uvm_page_physget(&paddr)) 439 panic("uvm_pageboot_alloc: out of memory"); 440 441 /* 442 * Note this memory is no longer managed, so using 443 * pmap_kenter is safe. 444 */ 445 pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE); 446 } 447 return(addr); 448#endif /* PMAP_STEAL_MEMORY */ 449} 450 451#if !defined(PMAP_STEAL_MEMORY) 452/* 453 * uvm_page_physget: "steal" one page from the vm_physmem structure. 454 * 455 * => attempt to allocate it off the end of a segment in which the "avail" 456 * values match the start/end values. if we can't do that, then we 457 * will advance both values (making them equal, and removing some 458 * vm_page structures from the non-avail area). 459 * => return false if out of memory. 460 */ 461 462/* subroutine: try to allocate from memory chunks on the specified freelist */ 463static boolean_t uvm_page_physget_freelist __P((paddr_t *, int)); 464 465static boolean_t 466uvm_page_physget_freelist(paddrp, freelist) 467 paddr_t *paddrp; 468 int freelist; 469{ 470 int lcv, x; 471 472 /* pass 1: try allocating from a matching end */ 473#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 474 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 475 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 476#else 477 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 478#endif 479 { 480 481 if (uvm.page_init_done == TRUE) 482 panic("vm_page_physget: called _after_ bootstrap"); 483 484 if (vm_physmem[lcv].free_list != freelist) 485 continue; 486 487 /* try from front */ 488 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start && 489 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 490 *paddrp = ptoa(vm_physmem[lcv].avail_start); 491 vm_physmem[lcv].avail_start++; 492 vm_physmem[lcv].start++; 493 /* nothing left? nuke it */ 494 if (vm_physmem[lcv].avail_start == 495 vm_physmem[lcv].end) { 496 if (vm_nphysseg == 1) 497 panic("vm_page_physget: out of memory!"); 498 vm_nphysseg--; 499 for (x = lcv ; x < vm_nphysseg ; x++) 500 /* structure copy */ 501 vm_physmem[x] = vm_physmem[x+1]; 502 } 503 return (TRUE); 504 } 505 506 /* try from rear */ 507 if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end && 508 vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) { 509 *paddrp = ptoa(vm_physmem[lcv].avail_end - 1); 510 vm_physmem[lcv].avail_end--; 511 vm_physmem[lcv].end--; 512 /* nothing left? nuke it */ 513 if (vm_physmem[lcv].avail_end == 514 vm_physmem[lcv].start) { 515 if (vm_nphysseg == 1) 516 panic("vm_page_physget: out of memory!"); 517 vm_nphysseg--; 518 for (x = lcv ; x < vm_nphysseg ; x++) 519 /* structure copy */ 520 vm_physmem[x] = vm_physmem[x+1]; 521 } 522 return (TRUE); 523 } 524 } 525 526 /* pass2: forget about matching ends, just allocate something */ 527#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \ 528 (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 529 for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--) 530#else 531 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 532#endif 533 { 534 535 /* any room in this bank? */ 536 if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end) 537 continue; /* nope */ 538 539 *paddrp = ptoa(vm_physmem[lcv].avail_start); 540 vm_physmem[lcv].avail_start++; 541 /* truncate! */ 542 vm_physmem[lcv].start = vm_physmem[lcv].avail_start; 543 544 /* nothing left? nuke it */ 545 if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) { 546 if (vm_nphysseg == 1) 547 panic("vm_page_physget: out of memory!"); 548 vm_nphysseg--; 549 for (x = lcv ; x < vm_nphysseg ; x++) 550 /* structure copy */ 551 vm_physmem[x] = vm_physmem[x+1]; 552 } 553 return (TRUE); 554 } 555 556 return (FALSE); /* whoops! */ 557} 558 559boolean_t 560uvm_page_physget(paddrp) 561 paddr_t *paddrp; 562{ 563 int i; 564 565 /* try in the order of freelist preference */ 566 for (i = 0; i < VM_NFREELIST; i++) 567 if (uvm_page_physget_freelist(paddrp, i) == TRUE) 568 return (TRUE); 569 return (FALSE); 570} 571#endif /* PMAP_STEAL_MEMORY */ 572 573/* 574 * uvm_page_physload: load physical memory into VM system 575 * 576 * => all args are PFs 577 * => all pages in start/end get vm_page structures 578 * => areas marked by avail_start/avail_end get added to the free page pool 579 * => we are limited to VM_PHYSSEG_MAX physical memory segments 580 */ 581 582void 583uvm_page_physload(start, end, avail_start, avail_end, free_list) 584 paddr_t start, end, avail_start, avail_end; 585 int free_list; 586{ 587 int preload, lcv; 588 psize_t npages; 589 struct vm_page *pgs; 590 struct vm_physseg *ps; 591 592 if (uvmexp.pagesize == 0) 593 panic("uvm_page_physload: page size not set!"); 594 595 if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT) 596 panic("uvm_page_physload: bad free list %d\n", free_list); 597 598 if (start >= end) 599 panic("uvm_page_physload: start >= end"); 600 601 /* 602 * do we have room? 603 */ 604 if (vm_nphysseg == VM_PHYSSEG_MAX) { 605 printf("uvm_page_physload: unable to load physical memory " 606 "segment\n"); 607 printf("\t%d segments allocated, ignoring 0x%lx -> 0x%lx\n", 608 VM_PHYSSEG_MAX, start, end); 609 return; 610 } 611 612 /* 613 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been 614 * called yet, so malloc is not available). 615 */ 616 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) { 617 if (vm_physmem[lcv].pgs) 618 break; 619 } 620 preload = (lcv == vm_nphysseg); 621 622 /* 623 * if VM is already running, attempt to malloc() vm_page structures 624 */ 625 if (!preload) { 626#if defined(VM_PHYSSEG_NOADD) 627 panic("uvm_page_physload: tried to add RAM after vm_mem_init"); 628#else 629 /* XXXCDC: need some sort of lockout for this case */ 630 paddr_t paddr; 631 npages = end - start; /* # of pages */ 632 MALLOC(pgs, struct vm_page *, sizeof(struct vm_page) * npages, 633 M_VMPAGE, M_NOWAIT); 634 if (pgs == NULL) { 635 printf("uvm_page_physload: can not malloc vm_page " 636 "structs for segment\n"); 637 printf("\tignoring 0x%lx -> 0x%lx\n", start, end); 638 return; 639 } 640 /* zero data, init phys_addr and free_list, and free pages */ 641 memset(pgs, 0, sizeof(struct vm_page) * npages); 642 for (lcv = 0, paddr = ptoa(start) ; 643 lcv < npages ; lcv++, paddr += PAGE_SIZE) { 644 pgs[lcv].phys_addr = paddr; 645 pgs[lcv].free_list = free_list; 646 if (atop(paddr) >= avail_start && 647 atop(paddr) <= avail_end) 648 uvm_pagefree(&pgs[lcv]); 649 } 650 /* XXXCDC: incomplete: need to update uvmexp.free, what else? */ 651 /* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */ 652#endif 653 } else { 654 655 /* gcc complains if these don't get init'd */ 656 pgs = NULL; 657 npages = 0; 658 659 } 660 661 /* 662 * now insert us in the proper place in vm_physmem[] 663 */ 664 665#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM) 666 667 /* random: put it at the end (easy!) */ 668 ps = &vm_physmem[vm_nphysseg]; 669 670#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH) 671 672 { 673 int x; 674 /* sort by address for binary search */ 675 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 676 if (start < vm_physmem[lcv].start) 677 break; 678 ps = &vm_physmem[lcv]; 679 /* move back other entries, if necessary ... */ 680 for (x = vm_nphysseg ; x > lcv ; x--) 681 /* structure copy */ 682 vm_physmem[x] = vm_physmem[x - 1]; 683 } 684 685#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) 686 687 { 688 int x; 689 /* sort by largest segment first */ 690 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 691 if ((end - start) > 692 (vm_physmem[lcv].end - vm_physmem[lcv].start)) 693 break; 694 ps = &vm_physmem[lcv]; 695 /* move back other entries, if necessary ... */ 696 for (x = vm_nphysseg ; x > lcv ; x--) 697 /* structure copy */ 698 vm_physmem[x] = vm_physmem[x - 1]; 699 } 700 701#else 702 703 panic("uvm_page_physload: unknown physseg strategy selected!"); 704 705#endif 706 707 ps->start = start; 708 ps->end = end; 709 ps->avail_start = avail_start; 710 ps->avail_end = avail_end; 711 if (preload) { 712 ps->pgs = NULL; 713 } else { 714 ps->pgs = pgs; 715 ps->lastpg = pgs + npages - 1; 716 } 717 ps->free_list = free_list; 718 vm_nphysseg++; 719 720 /* 721 * done! 722 */ 723 724 if (!preload) 725 uvm_page_rehash(); 726 727 return; 728} 729 730/* 731 * uvm_page_rehash: reallocate hash table based on number of free pages. 732 */ 733 734void 735uvm_page_rehash() 736{ 737 int freepages, lcv, bucketcount, s, oldcount; 738 struct pglist *newbuckets, *oldbuckets; 739 struct vm_page *pg; 740 741 /* 742 * compute number of pages that can go in the free pool 743 */ 744 745 freepages = 0; 746 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 747 freepages += 748 (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start); 749 750 /* 751 * compute number of buckets needed for this number of pages 752 */ 753 754 bucketcount = 1; 755 while (bucketcount < freepages) 756 bucketcount = bucketcount * 2; 757 758 /* 759 * malloc new buckets 760 */ 761 762 MALLOC(newbuckets, struct pglist *, sizeof(struct pglist) * bucketcount, 763 M_VMPBUCKET, M_NOWAIT); 764 if (newbuckets == NULL) { 765 printf("uvm_page_physrehash: WARNING: could not grow page " 766 "hash table\n"); 767 return; 768 } 769 for (lcv = 0 ; lcv < bucketcount ; lcv++) 770 TAILQ_INIT(&newbuckets[lcv]); 771 772 /* 773 * now replace the old buckets with the new ones and rehash everything 774 */ 775 776 s = splimp(); 777 simple_lock(&uvm.hashlock); 778 /* swap old for new ... */ 779 oldbuckets = uvm.page_hash; 780 oldcount = uvm.page_nhash; 781 uvm.page_hash = newbuckets; 782 uvm.page_nhash = bucketcount; 783 uvm.page_hashmask = bucketcount - 1; /* power of 2 */ 784 785 /* ... and rehash */ 786 for (lcv = 0 ; lcv < oldcount ; lcv++) { 787 while ((pg = oldbuckets[lcv].tqh_first) != NULL) { 788 TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq); 789 TAILQ_INSERT_TAIL( 790 &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)], 791 pg, hashq); 792 } 793 } 794 simple_unlock(&uvm.hashlock); 795 splx(s); 796 797 /* 798 * free old bucket array if is not the boot-time table 799 */ 800 801 if (oldbuckets != &uvm_bootbucket) 802 FREE(oldbuckets, M_VMPBUCKET); 803 804 /* 805 * done 806 */ 807 return; 808} 809 810 811#if 1 /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */ 812 813void uvm_page_physdump __P((void)); /* SHUT UP GCC */ 814 815/* call from DDB */ 816void 817uvm_page_physdump() 818{ 819 int lcv; 820 821 printf("rehash: physical memory config [segs=%d of %d]:\n", 822 vm_nphysseg, VM_PHYSSEG_MAX); 823 for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) 824 printf("0x%lx->0x%lx [0x%lx->0x%lx]\n", vm_physmem[lcv].start, 825 vm_physmem[lcv].end, vm_physmem[lcv].avail_start, 826 vm_physmem[lcv].avail_end); 827 printf("STRATEGY = "); 828 switch (VM_PHYSSEG_STRAT) { 829 case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break; 830 case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break; 831 case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break; 832 default: printf("<<UNKNOWN>>!!!!\n"); 833 } 834 printf("number of buckets = %d\n", uvm.page_nhash); 835} 836#endif 837 838/* 839 * uvm_pagealloc_strat: allocate vm_page from a particular free list. 840 * 841 * => return null if no pages free 842 * => wake up pagedaemon if number of free pages drops below low water mark 843 * => if obj != NULL, obj must be locked (to put in hash) 844 * => if anon != NULL, anon must be locked (to put in anon) 845 * => only one of obj or anon can be non-null 846 * => caller must activate/deactivate page if it is not wired. 847 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL. 848 */ 849 850struct vm_page * 851uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list) 852 struct uvm_object *obj; 853 vaddr_t off; 854 int flags; 855 struct vm_anon *anon; 856 int strat, free_list; 857{ 858 int lcv, s; 859 struct vm_page *pg; 860 struct pglist *freeq; 861 boolean_t use_reserve; 862 863#ifdef DIAGNOSTIC 864 /* sanity check */ 865 if (obj && anon) 866 panic("uvm_pagealloc: obj and anon != NULL"); 867#endif 868 869 s = uvm_lock_fpageq(); /* lock free page queue */ 870 871 /* 872 * check to see if we need to generate some free pages waking 873 * the pagedaemon. 874 */ 875 876 if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg && 877 uvmexp.inactive < uvmexp.inactarg)) 878 wakeup(&uvm.pagedaemon); 879 880 /* 881 * fail if any of these conditions is true: 882 * [1] there really are no free pages, or 883 * [2] only kernel "reserved" pages remain and 884 * the page isn't being allocated to a kernel object. 885 * [3] only pagedaemon "reserved" pages remain and 886 * the requestor isn't the pagedaemon. 887 */ 888 889 use_reserve = (flags & UVM_PGA_USERESERVE) || 890 (obj && UVM_OBJ_IS_KERN_OBJECT(obj)); 891 if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) || 892 (uvmexp.free <= uvmexp.reserve_pagedaemon && 893 !(use_reserve && (curproc == uvm.pagedaemon_proc || 894 curproc == syncerproc)))) 895 goto fail; 896 897 again: 898 switch (strat) { 899 case UVM_PGA_STRAT_NORMAL: 900 /* Check all freelists in descending priority order. */ 901 for (lcv = 0; lcv < VM_NFREELIST; lcv++) { 902 freeq = &uvm.page_free[lcv]; 903 if ((pg = freeq->tqh_first) != NULL) 904 goto gotit; 905 } 906 907 /* No pages free! */ 908 goto fail; 909 910 case UVM_PGA_STRAT_ONLY: 911 case UVM_PGA_STRAT_FALLBACK: 912 /* Attempt to allocate from the specified free list. */ 913#ifdef DIAGNOSTIC 914 if (free_list >= VM_NFREELIST || free_list < 0) 915 panic("uvm_pagealloc_strat: bad free list %d", 916 free_list); 917#endif 918 freeq = &uvm.page_free[free_list]; 919 if ((pg = freeq->tqh_first) != NULL) 920 goto gotit; 921 922 /* Fall back, if possible. */ 923 if (strat == UVM_PGA_STRAT_FALLBACK) { 924 strat = UVM_PGA_STRAT_NORMAL; 925 goto again; 926 } 927 928 /* No pages free! */ 929 goto fail; 930 931 default: 932 panic("uvm_pagealloc_strat: bad strat %d", strat); 933 /* NOTREACHED */ 934 } 935 936 gotit: 937 TAILQ_REMOVE(freeq, pg, pageq); 938 uvmexp.free--; 939 940 uvm_unlock_fpageq(s); /* unlock free page queue */ 941 942 pg->offset = off; 943 pg->uobject = obj; 944 pg->uanon = anon; 945 pg->flags = PG_BUSY|PG_CLEAN|PG_FAKE; 946 pg->version++; 947 pg->wire_count = 0; 948 pg->loan_count = 0; 949 if (anon) { 950 anon->u.an_page = pg; 951 pg->pqflags = PQ_ANON; 952 } else { 953 if (obj) 954 uvm_pageinsert(pg); 955 pg->pqflags = 0; 956 } 957#if defined(UVM_PAGE_TRKOWN) 958 pg->owner_tag = NULL; 959#endif 960 UVM_PAGE_OWN(pg, "new alloc"); 961 962 return(pg); 963 964 fail: 965 uvm_unlock_fpageq(s); 966 return (NULL); 967} 968 969/* 970 * uvm_pagealloc_contig: allocate contiguous memory. 971 * 972 * XXX - fix comment. 973 */ 974 975vaddr_t 976uvm_pagealloc_contig(size, low, high, alignment) 977 vaddr_t size; 978 vaddr_t low, high; 979 vaddr_t alignment; 980{ 981 struct pglist pglist; 982 struct vm_page *pg; 983 vaddr_t addr, temp_addr; 984 985 size = round_page(size); 986 987 TAILQ_INIT(&pglist); 988 if (uvm_pglistalloc(size, low, high, alignment, 0, 989 &pglist, 1, FALSE)) 990 return 0; 991 addr = vm_map_min(kernel_map); 992 if (uvm_map(kernel_map, &addr, size, NULL, UVM_UNKNOWN_OFFSET, 993 UVM_MAPFLAG(UVM_PROT_ALL, UVM_PROT_ALL, UVM_INH_NONE, 994 UVM_ADV_RANDOM, 0)) != KERN_SUCCESS) { 995 uvm_pglistfree(&pglist); 996 return 0; 997 } 998 temp_addr = addr; 999 for (pg = TAILQ_FIRST(&pglist); pg != NULL; 1000 pg = TAILQ_NEXT(pg, pageq)) { 1001 pg->uobject = uvm.kernel_object; 1002 pg->offset = temp_addr - vm_map_min(kernel_map); 1003 uvm_pageinsert(pg); 1004 uvm_pagewire(pg); 1005 pmap_kenter_pa(temp_addr, VM_PAGE_TO_PHYS(pg), 1006 VM_PROT_READ|VM_PROT_WRITE); 1007 temp_addr += PAGE_SIZE; 1008 } 1009 return addr; 1010} 1011 1012/* 1013 * uvm_pagerealloc: reallocate a page from one object to another 1014 * 1015 * => both objects must be locked 1016 */ 1017 1018void 1019uvm_pagerealloc(pg, newobj, newoff) 1020 struct vm_page *pg; 1021 struct uvm_object *newobj; 1022 vaddr_t newoff; 1023{ 1024 /* 1025 * remove it from the old object 1026 */ 1027 1028 if (pg->uobject) { 1029 uvm_pageremove(pg); 1030 } 1031 1032 /* 1033 * put it in the new object 1034 */ 1035 1036 if (newobj) { 1037 pg->uobject = newobj; 1038 pg->offset = newoff; 1039 pg->version++; 1040 uvm_pageinsert(pg); 1041 } 1042 1043 return; 1044} 1045 1046 1047/* 1048 * uvm_pagefree: free page 1049 * 1050 * => erase page's identity (i.e. remove from hash/object) 1051 * => put page on free list 1052 * => caller must lock owning object (either anon or uvm_object) 1053 * => caller must lock page queues 1054 * => assumes all valid mappings of pg are gone 1055 */ 1056 1057void uvm_pagefree(pg) 1058 1059struct vm_page *pg; 1060 1061{ 1062 int s; 1063 int saved_loan_count = pg->loan_count; 1064 1065 /* 1066 * if the page was an object page (and thus "TABLED"), remove it 1067 * from the object. 1068 */ 1069 1070 if (pg->flags & PG_TABLED) { 1071 1072 /* 1073 * if the object page is on loan we are going to drop ownership. 1074 * it is possible that an anon will take over as owner for this 1075 * page later on. the anon will want a !PG_CLEAN page so that 1076 * it knows it needs to allocate swap if it wants to page the 1077 * page out. 1078 */ 1079 1080 if (saved_loan_count) 1081 pg->flags &= ~PG_CLEAN; /* in case an anon takes over */ 1082 1083 uvm_pageremove(pg); 1084 1085 /* 1086 * if our page was on loan, then we just lost control over it 1087 * (in fact, if it was loaned to an anon, the anon may have 1088 * already taken over ownership of the page by now and thus 1089 * changed the loan_count [e.g. in uvmfault_anonget()]) we just 1090 * return (when the last loan is dropped, then the page can be 1091 * freed by whatever was holding the last loan). 1092 */ 1093 if (saved_loan_count) 1094 return; 1095 1096 } else if (saved_loan_count && (pg->pqflags & PQ_ANON)) { 1097 1098 /* 1099 * if our page is owned by an anon and is loaned out to the 1100 * kernel then we just want to drop ownership and return. 1101 * the kernel must free the page when all its loans clear ... 1102 * note that the kernel can't change the loan status of our 1103 * page as long as we are holding PQ lock. 1104 */ 1105 pg->pqflags &= ~PQ_ANON; 1106 pg->uanon = NULL; 1107 return; 1108 } 1109 1110#ifdef DIAGNOSTIC 1111 if (saved_loan_count) { 1112 printf("uvm_pagefree: warning: freeing page with a loan " 1113 "count of %d\n", saved_loan_count); 1114 panic("uvm_pagefree: loan count"); 1115 } 1116#endif 1117 1118 1119 /* 1120 * now remove the page from the queues 1121 */ 1122 1123 if (pg->pqflags & PQ_ACTIVE) { 1124 TAILQ_REMOVE(&uvm.page_active, pg, pageq); 1125 pg->pqflags &= ~PQ_ACTIVE; 1126 uvmexp.active--; 1127 } 1128 if (pg->pqflags & PQ_INACTIVE) { 1129 if (pg->pqflags & PQ_SWAPBACKED) 1130 TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq); 1131 else 1132 TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq); 1133 pg->pqflags &= ~PQ_INACTIVE; 1134 uvmexp.inactive--; 1135 } 1136 1137 /* 1138 * if the page was wired, unwire it now. 1139 */ 1140 if (pg->wire_count) 1141 { 1142 pg->wire_count = 0; 1143 uvmexp.wired--; 1144 } 1145 1146 /* 1147 * and put on free queue 1148 */ 1149 1150 s = uvm_lock_fpageq(); 1151 TAILQ_INSERT_TAIL(&uvm.page_free[uvm_page_lookup_freelist(pg)], 1152 pg, pageq); 1153 pg->pqflags = PQ_FREE; 1154#ifdef DEBUG 1155 pg->uobject = (void *)0xdeadbeef; 1156 pg->offset = 0xdeadbeef; 1157 pg->uanon = (void *)0xdeadbeef; 1158#endif 1159 uvmexp.free++; 1160 uvm_unlock_fpageq(s); 1161} 1162 1163#if defined(UVM_PAGE_TRKOWN) 1164/* 1165 * uvm_page_own: set or release page ownership 1166 * 1167 * => this is a debugging function that keeps track of who sets PG_BUSY 1168 * and where they do it. it can be used to track down problems 1169 * such a process setting "PG_BUSY" and never releasing it. 1170 * => page's object [if any] must be locked 1171 * => if "tag" is NULL then we are releasing page ownership 1172 */ 1173void 1174uvm_page_own(pg, tag) 1175 struct vm_page *pg; 1176 char *tag; 1177{ 1178 /* gain ownership? */ 1179 if (tag) { 1180 if (pg->owner_tag) { 1181 printf("uvm_page_own: page %p already owned " 1182 "by proc %d [%s]\n", pg, 1183 pg->owner, pg->owner_tag); 1184 panic("uvm_page_own"); 1185 } 1186 pg->owner = (curproc) ? curproc->p_pid : (pid_t) -1; 1187 pg->owner_tag = tag; 1188 return; 1189 } 1190 1191 /* drop ownership */ 1192 if (pg->owner_tag == NULL) { 1193 printf("uvm_page_own: dropping ownership of an non-owned " 1194 "page (%p)\n", pg); 1195 panic("uvm_page_own"); 1196 } 1197 pg->owner_tag = NULL; 1198 return; 1199} 1200#endif 1201