vm_phys.c revision 285634
1/*- 2 * Copyright (c) 2002-2006 Rice University 3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4 * All rights reserved. 5 * 6 * This software was developed for the FreeBSD Project by Alan L. Cox, 7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/* 33 * Physical memory system implementation 34 * 35 * Any external functions defined by this module are only to be used by the 36 * virtual memory system. 37 */ 38 39#include <sys/cdefs.h> 40__FBSDID("$FreeBSD: stable/10/sys/vm/vm_phys.c 285634 2015-07-16 14:41:58Z kib $"); 41 42#include "opt_ddb.h" 43#include "opt_vm.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/lock.h> 48#include <sys/kernel.h> 49#include <sys/malloc.h> 50#include <sys/mutex.h> 51#if MAXMEMDOM > 1 52#include <sys/proc.h> 53#endif 54#include <sys/queue.h> 55#include <sys/sbuf.h> 56#include <sys/sysctl.h> 57#include <sys/vmmeter.h> 58 59#include <ddb/ddb.h> 60 61#include <vm/vm.h> 62#include <vm/vm_param.h> 63#include <vm/vm_kern.h> 64#include <vm/vm_object.h> 65#include <vm/vm_page.h> 66#include <vm/vm_phys.h> 67 68_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 69 "Too many physsegs."); 70 71struct mem_affinity *mem_affinity; 72 73int vm_ndomains = 1; 74 75struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 76int vm_phys_nsegs; 77 78#define VM_PHYS_FICTITIOUS_NSEGS 8 79static struct vm_phys_fictitious_seg { 80 vm_paddr_t start; 81 vm_paddr_t end; 82 vm_page_t first_page; 83} vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 84static struct mtx vm_phys_fictitious_reg_mtx; 85MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 86 87static struct vm_freelist 88 vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 89 90static int vm_nfreelists; 91 92/* 93 * Provides the mapping from VM_FREELIST_* to free list indices (flind). 94 */ 95static int vm_freelist_to_flind[VM_NFREELIST]; 96 97CTASSERT(VM_FREELIST_DEFAULT == 0); 98 99#ifdef VM_FREELIST_ISADMA 100#define VM_ISADMA_BOUNDARY 16777216 101#endif 102#ifdef VM_FREELIST_DMA32 103#define VM_DMA32_BOUNDARY ((vm_paddr_t)1 << 32) 104#endif 105 106/* 107 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about 108 * the ordering of the free list boundaries. 109 */ 110#if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY) 111CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY); 112#endif 113#if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY) 114CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY); 115#endif 116 117static int cnt_prezero; 118SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, 119 &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); 120 121static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 122SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 123 NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 124 125static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 126SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 127 NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 128 129SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 130 &vm_ndomains, 0, "Number of physical memory domains available."); 131 132static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 133 int order); 134static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); 135static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); 136static int vm_phys_paddr_to_segind(vm_paddr_t pa); 137static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 138 int order); 139 140static __inline int 141vm_rr_selectdomain(void) 142{ 143#if MAXMEMDOM > 1 144 struct thread *td; 145 146 td = curthread; 147 148 td->td_dom_rr_idx++; 149 td->td_dom_rr_idx %= vm_ndomains; 150 return (td->td_dom_rr_idx); 151#else 152 return (0); 153#endif 154} 155 156boolean_t 157vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) 158{ 159 struct vm_phys_seg *s; 160 int idx; 161 162 while ((idx = ffsl(mask)) != 0) { 163 idx--; /* ffsl counts from 1 */ 164 mask &= ~(1UL << idx); 165 s = &vm_phys_segs[idx]; 166 if (low < s->end && high > s->start) 167 return (TRUE); 168 } 169 return (FALSE); 170} 171 172/* 173 * Outputs the state of the physical memory allocator, specifically, 174 * the amount of physical memory in each free list. 175 */ 176static int 177sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 178{ 179 struct sbuf sbuf; 180 struct vm_freelist *fl; 181 int dom, error, flind, oind, pind; 182 183 error = sysctl_wire_old_buffer(req, 0); 184 if (error != 0) 185 return (error); 186 sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 187 for (dom = 0; dom < vm_ndomains; dom++) { 188 sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 189 for (flind = 0; flind < vm_nfreelists; flind++) { 190 sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 191 "\n ORDER (SIZE) | NUMBER" 192 "\n ", flind); 193 for (pind = 0; pind < VM_NFREEPOOL; pind++) 194 sbuf_printf(&sbuf, " | POOL %d", pind); 195 sbuf_printf(&sbuf, "\n-- "); 196 for (pind = 0; pind < VM_NFREEPOOL; pind++) 197 sbuf_printf(&sbuf, "-- -- "); 198 sbuf_printf(&sbuf, "--\n"); 199 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 200 sbuf_printf(&sbuf, " %2d (%6dK)", oind, 201 1 << (PAGE_SHIFT - 10 + oind)); 202 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 203 fl = vm_phys_free_queues[dom][flind][pind]; 204 sbuf_printf(&sbuf, " | %6d", 205 fl[oind].lcnt); 206 } 207 sbuf_printf(&sbuf, "\n"); 208 } 209 } 210 } 211 error = sbuf_finish(&sbuf); 212 sbuf_delete(&sbuf); 213 return (error); 214} 215 216/* 217 * Outputs the set of physical memory segments. 218 */ 219static int 220sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 221{ 222 struct sbuf sbuf; 223 struct vm_phys_seg *seg; 224 int error, segind; 225 226 error = sysctl_wire_old_buffer(req, 0); 227 if (error != 0) 228 return (error); 229 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 230 for (segind = 0; segind < vm_phys_nsegs; segind++) { 231 sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 232 seg = &vm_phys_segs[segind]; 233 sbuf_printf(&sbuf, "start: %#jx\n", 234 (uintmax_t)seg->start); 235 sbuf_printf(&sbuf, "end: %#jx\n", 236 (uintmax_t)seg->end); 237 sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 238 sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 239 } 240 error = sbuf_finish(&sbuf); 241 sbuf_delete(&sbuf); 242 return (error); 243} 244 245static void 246vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 247{ 248 249 m->order = order; 250 if (tail) 251 TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); 252 else 253 TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); 254 fl[order].lcnt++; 255} 256 257static void 258vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 259{ 260 261 TAILQ_REMOVE(&fl[order].pl, m, plinks.q); 262 fl[order].lcnt--; 263 m->order = VM_NFREEORDER; 264} 265 266/* 267 * Create a physical memory segment. 268 */ 269static void 270_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain) 271{ 272 struct vm_phys_seg *seg; 273 274 KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 275 ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 276 KASSERT(domain < vm_ndomains, 277 ("vm_phys_create_seg: invalid domain provided")); 278 seg = &vm_phys_segs[vm_phys_nsegs++]; 279 while (seg > vm_phys_segs && (seg - 1)->start >= end) { 280 *seg = *(seg - 1); 281 seg--; 282 } 283 seg->start = start; 284 seg->end = end; 285 seg->domain = domain; 286} 287 288static void 289vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end) 290{ 291 int i; 292 293 if (mem_affinity == NULL) { 294 _vm_phys_create_seg(start, end, 0); 295 return; 296 } 297 298 for (i = 0;; i++) { 299 if (mem_affinity[i].end == 0) 300 panic("Reached end of affinity info"); 301 if (mem_affinity[i].end <= start) 302 continue; 303 if (mem_affinity[i].start > start) 304 panic("No affinity info for start %jx", 305 (uintmax_t)start); 306 if (mem_affinity[i].end >= end) { 307 _vm_phys_create_seg(start, end, 308 mem_affinity[i].domain); 309 break; 310 } 311 _vm_phys_create_seg(start, mem_affinity[i].end, 312 mem_affinity[i].domain); 313 start = mem_affinity[i].end; 314 } 315} 316 317/* 318 * Add a physical memory segment. 319 */ 320void 321vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end) 322{ 323 vm_paddr_t paddr; 324 325 KASSERT((start & PAGE_MASK) == 0, 326 ("vm_phys_define_seg: start is not page aligned")); 327 KASSERT((end & PAGE_MASK) == 0, 328 ("vm_phys_define_seg: end is not page aligned")); 329 330 /* 331 * Split the physical memory segment if it spans two or more free 332 * list boundaries. 333 */ 334 paddr = start; 335#ifdef VM_FREELIST_ISADMA 336 if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) { 337 vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY); 338 paddr = VM_ISADMA_BOUNDARY; 339 } 340#endif 341#ifdef VM_FREELIST_LOWMEM 342 if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) { 343 vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY); 344 paddr = VM_LOWMEM_BOUNDARY; 345 } 346#endif 347#ifdef VM_FREELIST_DMA32 348 if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) { 349 vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY); 350 paddr = VM_DMA32_BOUNDARY; 351 } 352#endif 353 vm_phys_create_seg(paddr, end); 354} 355 356/* 357 * Initialize the physical memory allocator. 358 * 359 * Requires that vm_page_array is initialized! 360 */ 361void 362vm_phys_init(void) 363{ 364 struct vm_freelist *fl; 365 struct vm_phys_seg *seg; 366 u_long npages; 367 int dom, flind, freelist, oind, pind, segind; 368 369 /* 370 * Compute the number of free lists, and generate the mapping from the 371 * manifest constants VM_FREELIST_* to the free list indices. 372 * 373 * Initially, the entries of vm_freelist_to_flind[] are set to either 374 * 0 or 1 to indicate which free lists should be created. 375 */ 376 npages = 0; 377 for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) { 378 seg = &vm_phys_segs[segind]; 379#ifdef VM_FREELIST_ISADMA 380 if (seg->end <= VM_ISADMA_BOUNDARY) 381 vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1; 382 else 383#endif 384#ifdef VM_FREELIST_LOWMEM 385 if (seg->end <= VM_LOWMEM_BOUNDARY) 386 vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1; 387 else 388#endif 389#ifdef VM_FREELIST_DMA32 390 if ( 391#ifdef VM_DMA32_NPAGES_THRESHOLD 392 /* 393 * Create the DMA32 free list only if the amount of 394 * physical memory above physical address 4G exceeds the 395 * given threshold. 396 */ 397 npages > VM_DMA32_NPAGES_THRESHOLD && 398#endif 399 seg->end <= VM_DMA32_BOUNDARY) 400 vm_freelist_to_flind[VM_FREELIST_DMA32] = 1; 401 else 402#endif 403 { 404 npages += atop(seg->end - seg->start); 405 vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1; 406 } 407 } 408 /* Change each entry into a running total of the free lists. */ 409 for (freelist = 1; freelist < VM_NFREELIST; freelist++) { 410 vm_freelist_to_flind[freelist] += 411 vm_freelist_to_flind[freelist - 1]; 412 } 413 vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1]; 414 KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists")); 415 /* Change each entry into a free list index. */ 416 for (freelist = 0; freelist < VM_NFREELIST; freelist++) 417 vm_freelist_to_flind[freelist]--; 418 419 /* 420 * Initialize the first_page and free_queues fields of each physical 421 * memory segment. 422 */ 423#ifdef VM_PHYSSEG_SPARSE 424 npages = 0; 425#endif 426 for (segind = 0; segind < vm_phys_nsegs; segind++) { 427 seg = &vm_phys_segs[segind]; 428#ifdef VM_PHYSSEG_SPARSE 429 seg->first_page = &vm_page_array[npages]; 430 npages += atop(seg->end - seg->start); 431#else 432 seg->first_page = PHYS_TO_VM_PAGE(seg->start); 433#endif 434#ifdef VM_FREELIST_ISADMA 435 if (seg->end <= VM_ISADMA_BOUNDARY) { 436 flind = vm_freelist_to_flind[VM_FREELIST_ISADMA]; 437 KASSERT(flind >= 0, 438 ("vm_phys_init: ISADMA flind < 0")); 439 } else 440#endif 441#ifdef VM_FREELIST_LOWMEM 442 if (seg->end <= VM_LOWMEM_BOUNDARY) { 443 flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM]; 444 KASSERT(flind >= 0, 445 ("vm_phys_init: LOWMEM flind < 0")); 446 } else 447#endif 448#ifdef VM_FREELIST_DMA32 449 if (seg->end <= VM_DMA32_BOUNDARY) { 450 flind = vm_freelist_to_flind[VM_FREELIST_DMA32]; 451 KASSERT(flind >= 0, 452 ("vm_phys_init: DMA32 flind < 0")); 453 } else 454#endif 455 { 456 flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT]; 457 KASSERT(flind >= 0, 458 ("vm_phys_init: DEFAULT flind < 0")); 459 } 460 seg->free_queues = &vm_phys_free_queues[seg->domain][flind]; 461 } 462 463 /* 464 * Initialize the free queues. 465 */ 466 for (dom = 0; dom < vm_ndomains; dom++) { 467 for (flind = 0; flind < vm_nfreelists; flind++) { 468 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 469 fl = vm_phys_free_queues[dom][flind][pind]; 470 for (oind = 0; oind < VM_NFREEORDER; oind++) 471 TAILQ_INIT(&fl[oind].pl); 472 } 473 } 474 } 475 mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); 476} 477 478/* 479 * Split a contiguous, power of two-sized set of physical pages. 480 */ 481static __inline void 482vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 483{ 484 vm_page_t m_buddy; 485 486 while (oind > order) { 487 oind--; 488 m_buddy = &m[1 << oind]; 489 KASSERT(m_buddy->order == VM_NFREEORDER, 490 ("vm_phys_split_pages: page %p has unexpected order %d", 491 m_buddy, m_buddy->order)); 492 vm_freelist_add(fl, m_buddy, oind, 0); 493 } 494} 495 496/* 497 * Initialize a physical page and add it to the free lists. 498 */ 499void 500vm_phys_add_page(vm_paddr_t pa) 501{ 502 vm_page_t m; 503 struct vm_domain *vmd; 504 505 cnt.v_page_count++; 506 m = vm_phys_paddr_to_vm_page(pa); 507 m->phys_addr = pa; 508 m->queue = PQ_NONE; 509 m->segind = vm_phys_paddr_to_segind(pa); 510 vmd = vm_phys_domain(m); 511 vmd->vmd_page_count++; 512 vmd->vmd_segs |= 1UL << m->segind; 513 m->flags = PG_FREE; 514 KASSERT(m->order == VM_NFREEORDER, 515 ("vm_phys_add_page: page %p has unexpected order %d", 516 m, m->order)); 517 m->pool = VM_FREEPOOL_DEFAULT; 518 pmap_page_init(m); 519 mtx_lock(&vm_page_queue_free_mtx); 520 vm_phys_freecnt_adj(m, 1); 521 vm_phys_free_pages(m, 0); 522 mtx_unlock(&vm_page_queue_free_mtx); 523} 524 525/* 526 * Allocate a contiguous, power of two-sized set of physical pages 527 * from the free lists. 528 * 529 * The free page queues must be locked. 530 */ 531vm_page_t 532vm_phys_alloc_pages(int pool, int order) 533{ 534 vm_page_t m; 535 int dom, domain, flind; 536 537 KASSERT(pool < VM_NFREEPOOL, 538 ("vm_phys_alloc_pages: pool %d is out of range", pool)); 539 KASSERT(order < VM_NFREEORDER, 540 ("vm_phys_alloc_pages: order %d is out of range", order)); 541 542 for (dom = 0; dom < vm_ndomains; dom++) { 543 domain = vm_rr_selectdomain(); 544 for (flind = 0; flind < vm_nfreelists; flind++) { 545 m = vm_phys_alloc_domain_pages(domain, flind, pool, 546 order); 547 if (m != NULL) 548 return (m); 549 } 550 } 551 return (NULL); 552} 553 554/* 555 * Allocate a contiguous, power of two-sized set of physical pages from the 556 * specified free list. The free list must be specified using one of the 557 * manifest constants VM_FREELIST_*. 558 * 559 * The free page queues must be locked. 560 */ 561vm_page_t 562vm_phys_alloc_freelist_pages(int freelist, int pool, int order) 563{ 564 vm_page_t m; 565 int dom, domain; 566 567 KASSERT(freelist < VM_NFREELIST, 568 ("vm_phys_alloc_freelist_pages: freelist %d is out of range", 569 freelist)); 570 KASSERT(pool < VM_NFREEPOOL, 571 ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 572 KASSERT(order < VM_NFREEORDER, 573 ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 574 for (dom = 0; dom < vm_ndomains; dom++) { 575 domain = vm_rr_selectdomain(); 576 m = vm_phys_alloc_domain_pages(domain, 577 vm_freelist_to_flind[freelist], pool, order); 578 if (m != NULL) 579 return (m); 580 } 581 return (NULL); 582} 583 584static vm_page_t 585vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 586{ 587 struct vm_freelist *fl; 588 struct vm_freelist *alt; 589 int oind, pind; 590 vm_page_t m; 591 592 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 593 fl = &vm_phys_free_queues[domain][flind][pool][0]; 594 for (oind = order; oind < VM_NFREEORDER; oind++) { 595 m = TAILQ_FIRST(&fl[oind].pl); 596 if (m != NULL) { 597 vm_freelist_rem(fl, m, oind); 598 vm_phys_split_pages(m, oind, fl, order); 599 return (m); 600 } 601 } 602 603 /* 604 * The given pool was empty. Find the largest 605 * contiguous, power-of-two-sized set of pages in any 606 * pool. Transfer these pages to the given pool, and 607 * use them to satisfy the allocation. 608 */ 609 for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 610 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 611 alt = &vm_phys_free_queues[domain][flind][pind][0]; 612 m = TAILQ_FIRST(&alt[oind].pl); 613 if (m != NULL) { 614 vm_freelist_rem(alt, m, oind); 615 vm_phys_set_pool(pool, m, oind); 616 vm_phys_split_pages(m, oind, fl, order); 617 return (m); 618 } 619 } 620 } 621 return (NULL); 622} 623 624/* 625 * Find the vm_page corresponding to the given physical address. 626 */ 627vm_page_t 628vm_phys_paddr_to_vm_page(vm_paddr_t pa) 629{ 630 struct vm_phys_seg *seg; 631 int segind; 632 633 for (segind = 0; segind < vm_phys_nsegs; segind++) { 634 seg = &vm_phys_segs[segind]; 635 if (pa >= seg->start && pa < seg->end) 636 return (&seg->first_page[atop(pa - seg->start)]); 637 } 638 return (NULL); 639} 640 641vm_page_t 642vm_phys_fictitious_to_vm_page(vm_paddr_t pa) 643{ 644 struct vm_phys_fictitious_seg *seg; 645 vm_page_t m; 646 int segind; 647 648 m = NULL; 649 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 650 seg = &vm_phys_fictitious_segs[segind]; 651 if (pa >= seg->start && pa < seg->end) { 652 m = &seg->first_page[atop(pa - seg->start)]; 653 KASSERT((m->flags & PG_FICTITIOUS) != 0, 654 ("%p not fictitious", m)); 655 break; 656 } 657 } 658 return (m); 659} 660 661int 662vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 663 vm_memattr_t memattr) 664{ 665 struct vm_phys_fictitious_seg *seg; 666 vm_page_t fp; 667 long i, page_count; 668 int segind; 669#ifdef VM_PHYSSEG_DENSE 670 long pi; 671 boolean_t malloced; 672#endif 673 674 page_count = (end - start) / PAGE_SIZE; 675 676#ifdef VM_PHYSSEG_DENSE 677 pi = atop(start); 678 if (pi >= first_page && pi < vm_page_array_size + first_page) { 679 if (atop(end) >= vm_page_array_size + first_page) 680 return (EINVAL); 681 fp = &vm_page_array[pi - first_page]; 682 malloced = FALSE; 683 } else 684#endif 685 { 686 fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 687 M_WAITOK | M_ZERO); 688#ifdef VM_PHYSSEG_DENSE 689 malloced = TRUE; 690#endif 691 } 692 for (i = 0; i < page_count; i++) { 693 vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 694 fp[i].oflags &= ~VPO_UNMANAGED; 695 fp[i].busy_lock = VPB_UNBUSIED; 696 } 697 mtx_lock(&vm_phys_fictitious_reg_mtx); 698 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 699 seg = &vm_phys_fictitious_segs[segind]; 700 if (seg->start == 0 && seg->end == 0) { 701 seg->start = start; 702 seg->end = end; 703 seg->first_page = fp; 704 mtx_unlock(&vm_phys_fictitious_reg_mtx); 705 return (0); 706 } 707 } 708 mtx_unlock(&vm_phys_fictitious_reg_mtx); 709#ifdef VM_PHYSSEG_DENSE 710 if (malloced) 711#endif 712 free(fp, M_FICT_PAGES); 713 return (EBUSY); 714} 715 716void 717vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 718{ 719 struct vm_phys_fictitious_seg *seg; 720 vm_page_t fp; 721 int segind; 722#ifdef VM_PHYSSEG_DENSE 723 long pi; 724#endif 725 726#ifdef VM_PHYSSEG_DENSE 727 pi = atop(start); 728#endif 729 730 mtx_lock(&vm_phys_fictitious_reg_mtx); 731 for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 732 seg = &vm_phys_fictitious_segs[segind]; 733 if (seg->start == start && seg->end == end) { 734 seg->start = seg->end = 0; 735 fp = seg->first_page; 736 seg->first_page = NULL; 737 mtx_unlock(&vm_phys_fictitious_reg_mtx); 738#ifdef VM_PHYSSEG_DENSE 739 if (pi < first_page || atop(end) >= vm_page_array_size) 740#endif 741 free(fp, M_FICT_PAGES); 742 return; 743 } 744 } 745 mtx_unlock(&vm_phys_fictitious_reg_mtx); 746 KASSERT(0, ("Unregistering not registered fictitious range")); 747} 748 749/* 750 * Find the segment containing the given physical address. 751 */ 752static int 753vm_phys_paddr_to_segind(vm_paddr_t pa) 754{ 755 struct vm_phys_seg *seg; 756 int segind; 757 758 for (segind = 0; segind < vm_phys_nsegs; segind++) { 759 seg = &vm_phys_segs[segind]; 760 if (pa >= seg->start && pa < seg->end) 761 return (segind); 762 } 763 panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 764 (uintmax_t)pa); 765} 766 767/* 768 * Free a contiguous, power of two-sized set of physical pages. 769 * 770 * The free page queues must be locked. 771 */ 772void 773vm_phys_free_pages(vm_page_t m, int order) 774{ 775 struct vm_freelist *fl; 776 struct vm_phys_seg *seg; 777 vm_paddr_t pa; 778 vm_page_t m_buddy; 779 780 KASSERT(m->order == VM_NFREEORDER, 781 ("vm_phys_free_pages: page %p has unexpected order %d", 782 m, m->order)); 783 KASSERT(m->pool < VM_NFREEPOOL, 784 ("vm_phys_free_pages: page %p has unexpected pool %d", 785 m, m->pool)); 786 KASSERT(order < VM_NFREEORDER, 787 ("vm_phys_free_pages: order %d is out of range", order)); 788 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 789 seg = &vm_phys_segs[m->segind]; 790 if (order < VM_NFREEORDER - 1) { 791 pa = VM_PAGE_TO_PHYS(m); 792 do { 793 pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 794 if (pa < seg->start || pa >= seg->end) 795 break; 796 m_buddy = &seg->first_page[atop(pa - seg->start)]; 797 if (m_buddy->order != order) 798 break; 799 fl = (*seg->free_queues)[m_buddy->pool]; 800 vm_freelist_rem(fl, m_buddy, order); 801 if (m_buddy->pool != m->pool) 802 vm_phys_set_pool(m->pool, m_buddy, order); 803 order++; 804 pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 805 m = &seg->first_page[atop(pa - seg->start)]; 806 } while (order < VM_NFREEORDER - 1); 807 } 808 fl = (*seg->free_queues)[m->pool]; 809 vm_freelist_add(fl, m, order, 1); 810} 811 812/* 813 * Free a contiguous, arbitrarily sized set of physical pages. 814 * 815 * The free page queues must be locked. 816 */ 817void 818vm_phys_free_contig(vm_page_t m, u_long npages) 819{ 820 u_int n; 821 int order; 822 823 /* 824 * Avoid unnecessary coalescing by freeing the pages in the largest 825 * possible power-of-two-sized subsets. 826 */ 827 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 828 for (;; npages -= n) { 829 /* 830 * Unsigned "min" is used here so that "order" is assigned 831 * "VM_NFREEORDER - 1" when "m"'s physical address is zero 832 * or the low-order bits of its physical address are zero 833 * because the size of a physical address exceeds the size of 834 * a long. 835 */ 836 order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 837 VM_NFREEORDER - 1); 838 n = 1 << order; 839 if (npages < n) 840 break; 841 vm_phys_free_pages(m, order); 842 m += n; 843 } 844 /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 845 for (; npages > 0; npages -= n) { 846 order = flsl(npages) - 1; 847 n = 1 << order; 848 vm_phys_free_pages(m, order); 849 m += n; 850 } 851} 852 853/* 854 * Set the pool for a contiguous, power of two-sized set of physical pages. 855 */ 856void 857vm_phys_set_pool(int pool, vm_page_t m, int order) 858{ 859 vm_page_t m_tmp; 860 861 for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 862 m_tmp->pool = pool; 863} 864 865/* 866 * Search for the given physical page "m" in the free lists. If the search 867 * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 868 * FALSE, indicating that "m" is not in the free lists. 869 * 870 * The free page queues must be locked. 871 */ 872boolean_t 873vm_phys_unfree_page(vm_page_t m) 874{ 875 struct vm_freelist *fl; 876 struct vm_phys_seg *seg; 877 vm_paddr_t pa, pa_half; 878 vm_page_t m_set, m_tmp; 879 int order; 880 881 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 882 883 /* 884 * First, find the contiguous, power of two-sized set of free 885 * physical pages containing the given physical page "m" and 886 * assign it to "m_set". 887 */ 888 seg = &vm_phys_segs[m->segind]; 889 for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 890 order < VM_NFREEORDER - 1; ) { 891 order++; 892 pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 893 if (pa >= seg->start) 894 m_set = &seg->first_page[atop(pa - seg->start)]; 895 else 896 return (FALSE); 897 } 898 if (m_set->order < order) 899 return (FALSE); 900 if (m_set->order == VM_NFREEORDER) 901 return (FALSE); 902 KASSERT(m_set->order < VM_NFREEORDER, 903 ("vm_phys_unfree_page: page %p has unexpected order %d", 904 m_set, m_set->order)); 905 906 /* 907 * Next, remove "m_set" from the free lists. Finally, extract 908 * "m" from "m_set" using an iterative algorithm: While "m_set" 909 * is larger than a page, shrink "m_set" by returning the half 910 * of "m_set" that does not contain "m" to the free lists. 911 */ 912 fl = (*seg->free_queues)[m_set->pool]; 913 order = m_set->order; 914 vm_freelist_rem(fl, m_set, order); 915 while (order > 0) { 916 order--; 917 pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 918 if (m->phys_addr < pa_half) 919 m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 920 else { 921 m_tmp = m_set; 922 m_set = &seg->first_page[atop(pa_half - seg->start)]; 923 } 924 vm_freelist_add(fl, m_tmp, order, 0); 925 } 926 KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 927 return (TRUE); 928} 929 930/* 931 * Try to zero one physical page. Used by an idle priority thread. 932 */ 933boolean_t 934vm_phys_zero_pages_idle(void) 935{ 936 static struct vm_freelist *fl; 937 static int flind, oind, pind; 938 vm_page_t m, m_tmp; 939 int domain; 940 941 domain = vm_rr_selectdomain(); 942 fl = vm_phys_free_queues[domain][0][0]; 943 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 944 for (;;) { 945 TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { 946 for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { 947 if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { 948 vm_phys_unfree_page(m_tmp); 949 vm_phys_freecnt_adj(m, -1); 950 mtx_unlock(&vm_page_queue_free_mtx); 951 pmap_zero_page_idle(m_tmp); 952 m_tmp->flags |= PG_ZERO; 953 mtx_lock(&vm_page_queue_free_mtx); 954 vm_phys_freecnt_adj(m, 1); 955 vm_phys_free_pages(m_tmp, 0); 956 vm_page_zero_count++; 957 cnt_prezero++; 958 return (TRUE); 959 } 960 } 961 } 962 oind++; 963 if (oind == VM_NFREEORDER) { 964 oind = 0; 965 pind++; 966 if (pind == VM_NFREEPOOL) { 967 pind = 0; 968 flind++; 969 if (flind == vm_nfreelists) 970 flind = 0; 971 } 972 fl = vm_phys_free_queues[domain][flind][pind]; 973 } 974 } 975} 976 977/* 978 * Allocate a contiguous set of physical pages of the given size 979 * "npages" from the free lists. All of the physical pages must be at 980 * or above the given physical address "low" and below the given 981 * physical address "high". The given value "alignment" determines the 982 * alignment of the first physical page in the set. If the given value 983 * "boundary" is non-zero, then the set of physical pages cannot cross 984 * any physical address boundary that is a multiple of that value. Both 985 * "alignment" and "boundary" must be a power of two. 986 */ 987vm_page_t 988vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 989 u_long alignment, vm_paddr_t boundary) 990{ 991 struct vm_freelist *fl; 992 struct vm_phys_seg *seg; 993 vm_paddr_t pa, pa_last, size; 994 vm_page_t m, m_ret; 995 u_long npages_end; 996 int dom, domain, flind, oind, order, pind; 997 998 mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 999 size = npages << PAGE_SHIFT; 1000 KASSERT(size != 0, 1001 ("vm_phys_alloc_contig: size must not be 0")); 1002 KASSERT((alignment & (alignment - 1)) == 0, 1003 ("vm_phys_alloc_contig: alignment must be a power of 2")); 1004 KASSERT((boundary & (boundary - 1)) == 0, 1005 ("vm_phys_alloc_contig: boundary must be a power of 2")); 1006 /* Compute the queue that is the best fit for npages. */ 1007 for (order = 0; (1 << order) < npages; order++); 1008 dom = 0; 1009restartdom: 1010 domain = vm_rr_selectdomain(); 1011 for (flind = 0; flind < vm_nfreelists; flind++) { 1012 for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { 1013 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1014 fl = &vm_phys_free_queues[domain][flind][pind][0]; 1015 TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { 1016 /* 1017 * A free list may contain physical pages 1018 * from one or more segments. 1019 */ 1020 seg = &vm_phys_segs[m_ret->segind]; 1021 if (seg->start > high || 1022 low >= seg->end) 1023 continue; 1024 1025 /* 1026 * Is the size of this allocation request 1027 * larger than the largest block size? 1028 */ 1029 if (order >= VM_NFREEORDER) { 1030 /* 1031 * Determine if a sufficient number 1032 * of subsequent blocks to satisfy 1033 * the allocation request are free. 1034 */ 1035 pa = VM_PAGE_TO_PHYS(m_ret); 1036 pa_last = pa + size; 1037 for (;;) { 1038 pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); 1039 if (pa >= pa_last) 1040 break; 1041 if (pa < seg->start || 1042 pa >= seg->end) 1043 break; 1044 m = &seg->first_page[atop(pa - seg->start)]; 1045 if (m->order != VM_NFREEORDER - 1) 1046 break; 1047 } 1048 /* If not, continue to the next block. */ 1049 if (pa < pa_last) 1050 continue; 1051 } 1052 1053 /* 1054 * Determine if the blocks are within the given range, 1055 * satisfy the given alignment, and do not cross the 1056 * given boundary. 1057 */ 1058 pa = VM_PAGE_TO_PHYS(m_ret); 1059 if (pa >= low && 1060 pa + size <= high && 1061 (pa & (alignment - 1)) == 0 && 1062 ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0) 1063 goto done; 1064 } 1065 } 1066 } 1067 } 1068 if (++dom < vm_ndomains) 1069 goto restartdom; 1070 return (NULL); 1071done: 1072 for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 1073 fl = (*seg->free_queues)[m->pool]; 1074 vm_freelist_rem(fl, m, m->order); 1075 } 1076 if (m_ret->pool != VM_FREEPOOL_DEFAULT) 1077 vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 1078 fl = (*seg->free_queues)[m_ret->pool]; 1079 vm_phys_split_pages(m_ret, oind, fl, order); 1080 /* Return excess pages to the free lists. */ 1081 npages_end = roundup2(npages, 1 << imin(oind, order)); 1082 if (npages < npages_end) 1083 vm_phys_free_contig(&m_ret[npages], npages_end - npages); 1084 return (m_ret); 1085} 1086 1087#ifdef DDB 1088/* 1089 * Show the number of physical pages in each of the free lists. 1090 */ 1091DB_SHOW_COMMAND(freepages, db_show_freepages) 1092{ 1093 struct vm_freelist *fl; 1094 int flind, oind, pind, dom; 1095 1096 for (dom = 0; dom < vm_ndomains; dom++) { 1097 db_printf("DOMAIN: %d\n", dom); 1098 for (flind = 0; flind < vm_nfreelists; flind++) { 1099 db_printf("FREE LIST %d:\n" 1100 "\n ORDER (SIZE) | NUMBER" 1101 "\n ", flind); 1102 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1103 db_printf(" | POOL %d", pind); 1104 db_printf("\n-- "); 1105 for (pind = 0; pind < VM_NFREEPOOL; pind++) 1106 db_printf("-- -- "); 1107 db_printf("--\n"); 1108 for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 1109 db_printf(" %2.2d (%6.6dK)", oind, 1110 1 << (PAGE_SHIFT - 10 + oind)); 1111 for (pind = 0; pind < VM_NFREEPOOL; pind++) { 1112 fl = vm_phys_free_queues[dom][flind][pind]; 1113 db_printf(" | %6.6d", fl[oind].lcnt); 1114 } 1115 db_printf("\n"); 1116 } 1117 db_printf("\n"); 1118 } 1119 db_printf("\n"); 1120 } 1121} 1122#endif 1123