vm_phys.c revision 265435
1170477Salc/*- 2170477Salc * Copyright (c) 2002-2006 Rice University 3170477Salc * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu> 4170477Salc * All rights reserved. 5170477Salc * 6170477Salc * This software was developed for the FreeBSD Project by Alan L. Cox, 7170477Salc * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro. 8170477Salc * 9170477Salc * Redistribution and use in source and binary forms, with or without 10170477Salc * modification, are permitted provided that the following conditions 11170477Salc * are met: 12170477Salc * 1. Redistributions of source code must retain the above copyright 13170477Salc * notice, this list of conditions and the following disclaimer. 14170477Salc * 2. Redistributions in binary form must reproduce the above copyright 15170477Salc * notice, this list of conditions and the following disclaimer in the 16170477Salc * documentation and/or other materials provided with the distribution. 17170477Salc * 18170477Salc * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19170477Salc * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20170477Salc * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21170477Salc * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22170477Salc * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 23170477Salc * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24170477Salc * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 25170477Salc * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26170477Salc * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27170477Salc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY 28170477Salc * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29170477Salc * POSSIBILITY OF SUCH DAMAGE. 30170477Salc */ 31170477Salc 32227568Salc/* 33227568Salc * Physical memory system implementation 34227568Salc * 35227568Salc * Any external functions defined by this module are only to be used by the 36227568Salc * virtual memory system. 37227568Salc */ 38227568Salc 39170477Salc#include <sys/cdefs.h> 40170477Salc__FBSDID("$FreeBSD: stable/10/sys/vm/vm_phys.c 265435 2014-05-06 12:20:07Z kib $"); 41170477Salc 42170477Salc#include "opt_ddb.h" 43246805Sjhb#include "opt_vm.h" 44170477Salc 45170477Salc#include <sys/param.h> 46170477Salc#include <sys/systm.h> 47170477Salc#include <sys/lock.h> 48170477Salc#include <sys/kernel.h> 49170477Salc#include <sys/malloc.h> 50170477Salc#include <sys/mutex.h> 51250601Sattilio#if MAXMEMDOM > 1 52250601Sattilio#include <sys/proc.h> 53250601Sattilio#endif 54170477Salc#include <sys/queue.h> 55170477Salc#include <sys/sbuf.h> 56170477Salc#include <sys/sysctl.h> 57170477Salc#include <sys/vmmeter.h> 58170477Salc 59170477Salc#include <ddb/ddb.h> 60170477Salc 61170477Salc#include <vm/vm.h> 62170477Salc#include <vm/vm_param.h> 63170477Salc#include <vm/vm_kern.h> 64170477Salc#include <vm/vm_object.h> 65170477Salc#include <vm/vm_page.h> 66170477Salc#include <vm/vm_phys.h> 67170477Salc 68254065Skib_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX, 69254065Skib "Too many physsegs."); 70170477Salc 71210550Sjhbstruct mem_affinity *mem_affinity; 72210550Sjhb 73250601Sattilioint vm_ndomains = 1; 74250601Sattilio 75254065Skibstruct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; 76254065Skibint vm_phys_nsegs; 77170477Salc 78235372Skib#define VM_PHYS_FICTITIOUS_NSEGS 8 79235372Skibstatic struct vm_phys_fictitious_seg { 80235372Skib vm_paddr_t start; 81235372Skib vm_paddr_t end; 82235372Skib vm_page_t first_page; 83235372Skib} vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; 84235372Skibstatic struct mtx vm_phys_fictitious_reg_mtx; 85254017SmarkjMALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages"); 86235372Skib 87170477Salcstatic struct vm_freelist 88250601Sattilio vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; 89170477Salc 90170477Salcstatic int vm_nfreelists = VM_FREELIST_DEFAULT + 1; 91170477Salc 92170477Salcstatic int cnt_prezero; 93170477SalcSYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD, 94170477Salc &cnt_prezero, 0, "The number of physical pages prezeroed at idle time"); 95170477Salc 96170477Salcstatic int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS); 97170477SalcSYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD, 98170477Salc NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info"); 99170477Salc 100170477Salcstatic int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS); 101170477SalcSYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD, 102170477Salc NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info"); 103170477Salc 104250601SattilioSYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD, 105250601Sattilio &vm_ndomains, 0, "Number of physical memory domains available."); 106210550Sjhb 107250219Sjhbstatic vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool, 108250219Sjhb int order); 109210550Sjhbstatic void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, 110210550Sjhb int domain); 111170477Salcstatic void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind); 112170477Salcstatic int vm_phys_paddr_to_segind(vm_paddr_t pa); 113170477Salcstatic void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, 114170477Salc int order); 115170477Salc 116250601Sattiliostatic __inline int 117250601Sattiliovm_rr_selectdomain(void) 118250601Sattilio{ 119250601Sattilio#if MAXMEMDOM > 1 120250601Sattilio struct thread *td; 121250601Sattilio 122250601Sattilio td = curthread; 123250601Sattilio 124250601Sattilio td->td_dom_rr_idx++; 125250601Sattilio td->td_dom_rr_idx %= vm_ndomains; 126250601Sattilio return (td->td_dom_rr_idx); 127250601Sattilio#else 128250601Sattilio return (0); 129250601Sattilio#endif 130250601Sattilio} 131250601Sattilio 132254065Skibboolean_t 133254065Skibvm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high) 134254065Skib{ 135254065Skib struct vm_phys_seg *s; 136254065Skib int idx; 137254065Skib 138254065Skib while ((idx = ffsl(mask)) != 0) { 139254065Skib idx--; /* ffsl counts from 1 */ 140254065Skib mask &= ~(1UL << idx); 141254065Skib s = &vm_phys_segs[idx]; 142254065Skib if (low < s->end && high > s->start) 143254065Skib return (TRUE); 144254065Skib } 145254065Skib return (FALSE); 146254065Skib} 147254065Skib 148170477Salc/* 149170477Salc * Outputs the state of the physical memory allocator, specifically, 150170477Salc * the amount of physical memory in each free list. 151170477Salc */ 152170477Salcstatic int 153170477Salcsysctl_vm_phys_free(SYSCTL_HANDLER_ARGS) 154170477Salc{ 155170477Salc struct sbuf sbuf; 156170477Salc struct vm_freelist *fl; 157250601Sattilio int dom, error, flind, oind, pind; 158170477Salc 159217916Smdf error = sysctl_wire_old_buffer(req, 0); 160217916Smdf if (error != 0) 161217916Smdf return (error); 162250601Sattilio sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req); 163250601Sattilio for (dom = 0; dom < vm_ndomains; dom++) { 164256275Salc sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom); 165250601Sattilio for (flind = 0; flind < vm_nfreelists; flind++) { 166256275Salc sbuf_printf(&sbuf, "\nFREE LIST %d:\n" 167250601Sattilio "\n ORDER (SIZE) | NUMBER" 168250601Sattilio "\n ", flind); 169250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) 170250601Sattilio sbuf_printf(&sbuf, " | POOL %d", pind); 171250601Sattilio sbuf_printf(&sbuf, "\n-- "); 172250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) 173250601Sattilio sbuf_printf(&sbuf, "-- -- "); 174250601Sattilio sbuf_printf(&sbuf, "--\n"); 175250601Sattilio for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 176250601Sattilio sbuf_printf(&sbuf, " %2d (%6dK)", oind, 177250601Sattilio 1 << (PAGE_SHIFT - 10 + oind)); 178250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) { 179250601Sattilio fl = vm_phys_free_queues[dom][flind][pind]; 180256275Salc sbuf_printf(&sbuf, " | %6d", 181250601Sattilio fl[oind].lcnt); 182250601Sattilio } 183250601Sattilio sbuf_printf(&sbuf, "\n"); 184170477Salc } 185170477Salc } 186170477Salc } 187212750Smdf error = sbuf_finish(&sbuf); 188170477Salc sbuf_delete(&sbuf); 189170477Salc return (error); 190170477Salc} 191170477Salc 192170477Salc/* 193170477Salc * Outputs the set of physical memory segments. 194170477Salc */ 195170477Salcstatic int 196170477Salcsysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS) 197170477Salc{ 198170477Salc struct sbuf sbuf; 199170477Salc struct vm_phys_seg *seg; 200170477Salc int error, segind; 201170477Salc 202217916Smdf error = sysctl_wire_old_buffer(req, 0); 203217916Smdf if (error != 0) 204217916Smdf return (error); 205212750Smdf sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 206170477Salc for (segind = 0; segind < vm_phys_nsegs; segind++) { 207170477Salc sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind); 208170477Salc seg = &vm_phys_segs[segind]; 209170477Salc sbuf_printf(&sbuf, "start: %#jx\n", 210170477Salc (uintmax_t)seg->start); 211170477Salc sbuf_printf(&sbuf, "end: %#jx\n", 212170477Salc (uintmax_t)seg->end); 213210550Sjhb sbuf_printf(&sbuf, "domain: %d\n", seg->domain); 214170477Salc sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues); 215170477Salc } 216212750Smdf error = sbuf_finish(&sbuf); 217170477Salc sbuf_delete(&sbuf); 218170477Salc return (error); 219170477Salc} 220170477Salc 221250601Sattiliostatic void 222250601Sattiliovm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail) 223210550Sjhb{ 224210550Sjhb 225250601Sattilio m->order = order; 226250601Sattilio if (tail) 227254182Skib TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q); 228250601Sattilio else 229254182Skib TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q); 230250601Sattilio fl[order].lcnt++; 231210550Sjhb} 232250601Sattilio 233250601Sattiliostatic void 234250601Sattiliovm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order) 235250601Sattilio{ 236250601Sattilio 237254182Skib TAILQ_REMOVE(&fl[order].pl, m, plinks.q); 238250601Sattilio fl[order].lcnt--; 239250601Sattilio m->order = VM_NFREEORDER; 240250601Sattilio} 241250601Sattilio 242210550Sjhb/* 243170477Salc * Create a physical memory segment. 244170477Salc */ 245170477Salcstatic void 246210550Sjhb_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind, int domain) 247170477Salc{ 248170477Salc struct vm_phys_seg *seg; 249170477Salc#ifdef VM_PHYSSEG_SPARSE 250233194Sjhb long pages; 251170477Salc int segind; 252170477Salc 253170477Salc pages = 0; 254170477Salc for (segind = 0; segind < vm_phys_nsegs; segind++) { 255170477Salc seg = &vm_phys_segs[segind]; 256170477Salc pages += atop(seg->end - seg->start); 257170477Salc } 258170477Salc#endif 259170477Salc KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX, 260170477Salc ("vm_phys_create_seg: increase VM_PHYSSEG_MAX")); 261250601Sattilio KASSERT(domain < vm_ndomains, 262250601Sattilio ("vm_phys_create_seg: invalid domain provided")); 263170477Salc seg = &vm_phys_segs[vm_phys_nsegs++]; 264170477Salc seg->start = start; 265170477Salc seg->end = end; 266210550Sjhb seg->domain = domain; 267170477Salc#ifdef VM_PHYSSEG_SPARSE 268170477Salc seg->first_page = &vm_page_array[pages]; 269170477Salc#else 270170477Salc seg->first_page = PHYS_TO_VM_PAGE(start); 271170477Salc#endif 272250601Sattilio seg->free_queues = &vm_phys_free_queues[domain][flind]; 273170477Salc} 274170477Salc 275210550Sjhbstatic void 276210550Sjhbvm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int flind) 277210550Sjhb{ 278210550Sjhb int i; 279210550Sjhb 280210550Sjhb if (mem_affinity == NULL) { 281210550Sjhb _vm_phys_create_seg(start, end, flind, 0); 282210550Sjhb return; 283210550Sjhb } 284210550Sjhb 285210550Sjhb for (i = 0;; i++) { 286210550Sjhb if (mem_affinity[i].end == 0) 287210550Sjhb panic("Reached end of affinity info"); 288210550Sjhb if (mem_affinity[i].end <= start) 289210550Sjhb continue; 290210550Sjhb if (mem_affinity[i].start > start) 291210550Sjhb panic("No affinity info for start %jx", 292210550Sjhb (uintmax_t)start); 293210550Sjhb if (mem_affinity[i].end >= end) { 294210550Sjhb _vm_phys_create_seg(start, end, flind, 295210550Sjhb mem_affinity[i].domain); 296210550Sjhb break; 297210550Sjhb } 298210550Sjhb _vm_phys_create_seg(start, mem_affinity[i].end, flind, 299210550Sjhb mem_affinity[i].domain); 300210550Sjhb start = mem_affinity[i].end; 301210550Sjhb } 302210550Sjhb} 303210550Sjhb 304170477Salc/* 305170477Salc * Initialize the physical memory allocator. 306170477Salc */ 307170477Salcvoid 308170477Salcvm_phys_init(void) 309170477Salc{ 310170477Salc struct vm_freelist *fl; 311250601Sattilio int dom, flind, i, oind, pind; 312170477Salc 313170477Salc for (i = 0; phys_avail[i + 1] != 0; i += 2) { 314170477Salc#ifdef VM_FREELIST_ISADMA 315170477Salc if (phys_avail[i] < 16777216) { 316170477Salc if (phys_avail[i + 1] > 16777216) { 317170477Salc vm_phys_create_seg(phys_avail[i], 16777216, 318170477Salc VM_FREELIST_ISADMA); 319170477Salc vm_phys_create_seg(16777216, phys_avail[i + 1], 320170477Salc VM_FREELIST_DEFAULT); 321170477Salc } else { 322170477Salc vm_phys_create_seg(phys_avail[i], 323170477Salc phys_avail[i + 1], VM_FREELIST_ISADMA); 324170477Salc } 325170477Salc if (VM_FREELIST_ISADMA >= vm_nfreelists) 326170477Salc vm_nfreelists = VM_FREELIST_ISADMA + 1; 327170477Salc } else 328170477Salc#endif 329170477Salc#ifdef VM_FREELIST_HIGHMEM 330170477Salc if (phys_avail[i + 1] > VM_HIGHMEM_ADDRESS) { 331170477Salc if (phys_avail[i] < VM_HIGHMEM_ADDRESS) { 332170477Salc vm_phys_create_seg(phys_avail[i], 333170477Salc VM_HIGHMEM_ADDRESS, VM_FREELIST_DEFAULT); 334170477Salc vm_phys_create_seg(VM_HIGHMEM_ADDRESS, 335170477Salc phys_avail[i + 1], VM_FREELIST_HIGHMEM); 336170477Salc } else { 337170477Salc vm_phys_create_seg(phys_avail[i], 338170477Salc phys_avail[i + 1], VM_FREELIST_HIGHMEM); 339170477Salc } 340170477Salc if (VM_FREELIST_HIGHMEM >= vm_nfreelists) 341170477Salc vm_nfreelists = VM_FREELIST_HIGHMEM + 1; 342170477Salc } else 343170477Salc#endif 344170477Salc vm_phys_create_seg(phys_avail[i], phys_avail[i + 1], 345170477Salc VM_FREELIST_DEFAULT); 346170477Salc } 347250601Sattilio for (dom = 0; dom < vm_ndomains; dom++) { 348250601Sattilio for (flind = 0; flind < vm_nfreelists; flind++) { 349250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) { 350250601Sattilio fl = vm_phys_free_queues[dom][flind][pind]; 351250601Sattilio for (oind = 0; oind < VM_NFREEORDER; oind++) 352250601Sattilio TAILQ_INIT(&fl[oind].pl); 353250601Sattilio } 354170477Salc } 355170477Salc } 356235372Skib mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); 357170477Salc} 358170477Salc 359170477Salc/* 360170477Salc * Split a contiguous, power of two-sized set of physical pages. 361170477Salc */ 362170477Salcstatic __inline void 363170477Salcvm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) 364170477Salc{ 365170477Salc vm_page_t m_buddy; 366170477Salc 367170477Salc while (oind > order) { 368170477Salc oind--; 369170477Salc m_buddy = &m[1 << oind]; 370170477Salc KASSERT(m_buddy->order == VM_NFREEORDER, 371170477Salc ("vm_phys_split_pages: page %p has unexpected order %d", 372170477Salc m_buddy, m_buddy->order)); 373250601Sattilio vm_freelist_add(fl, m_buddy, oind, 0); 374170477Salc } 375170477Salc} 376170477Salc 377170477Salc/* 378170477Salc * Initialize a physical page and add it to the free lists. 379170477Salc */ 380170477Salcvoid 381170477Salcvm_phys_add_page(vm_paddr_t pa) 382170477Salc{ 383170477Salc vm_page_t m; 384254065Skib struct vm_domain *vmd; 385170477Salc 386170477Salc cnt.v_page_count++; 387170477Salc m = vm_phys_paddr_to_vm_page(pa); 388170477Salc m->phys_addr = pa; 389217508Salc m->queue = PQ_NONE; 390170477Salc m->segind = vm_phys_paddr_to_segind(pa); 391254065Skib vmd = vm_phys_domain(m); 392254065Skib vmd->vmd_page_count++; 393254065Skib vmd->vmd_segs |= 1UL << m->segind; 394170477Salc m->flags = PG_FREE; 395170477Salc KASSERT(m->order == VM_NFREEORDER, 396170477Salc ("vm_phys_add_page: page %p has unexpected order %d", 397170477Salc m, m->order)); 398170477Salc m->pool = VM_FREEPOOL_DEFAULT; 399170477Salc pmap_page_init(m); 400171451Salc mtx_lock(&vm_page_queue_free_mtx); 401254065Skib vm_phys_freecnt_adj(m, 1); 402170477Salc vm_phys_free_pages(m, 0); 403171451Salc mtx_unlock(&vm_page_queue_free_mtx); 404170477Salc} 405170477Salc 406170477Salc/* 407170477Salc * Allocate a contiguous, power of two-sized set of physical pages 408170477Salc * from the free lists. 409171451Salc * 410171451Salc * The free page queues must be locked. 411170477Salc */ 412170477Salcvm_page_t 413170477Salcvm_phys_alloc_pages(int pool, int order) 414170477Salc{ 415210327Sjchandra vm_page_t m; 416250601Sattilio int dom, domain, flind; 417210327Sjchandra 418250219Sjhb KASSERT(pool < VM_NFREEPOOL, 419250219Sjhb ("vm_phys_alloc_pages: pool %d is out of range", pool)); 420250219Sjhb KASSERT(order < VM_NFREEORDER, 421250219Sjhb ("vm_phys_alloc_pages: order %d is out of range", order)); 422250219Sjhb 423250601Sattilio for (dom = 0; dom < vm_ndomains; dom++) { 424250601Sattilio domain = vm_rr_selectdomain(); 425250601Sattilio for (flind = 0; flind < vm_nfreelists; flind++) { 426250601Sattilio m = vm_phys_alloc_domain_pages(domain, flind, pool, 427250601Sattilio order); 428250601Sattilio if (m != NULL) 429250601Sattilio return (m); 430250601Sattilio } 431210327Sjchandra } 432210327Sjchandra return (NULL); 433210327Sjchandra} 434210327Sjchandra 435210327Sjchandra/* 436210327Sjchandra * Find and dequeue a free page on the given free list, with the 437210327Sjchandra * specified pool and order 438210327Sjchandra */ 439210327Sjchandravm_page_t 440210327Sjchandravm_phys_alloc_freelist_pages(int flind, int pool, int order) 441250219Sjhb{ 442170477Salc vm_page_t m; 443250601Sattilio int dom, domain; 444170477Salc 445210327Sjchandra KASSERT(flind < VM_NFREELIST, 446210327Sjchandra ("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind)); 447170477Salc KASSERT(pool < VM_NFREEPOOL, 448210327Sjchandra ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool)); 449170477Salc KASSERT(order < VM_NFREEORDER, 450210327Sjchandra ("vm_phys_alloc_freelist_pages: order %d is out of range", order)); 451210550Sjhb 452250601Sattilio for (dom = 0; dom < vm_ndomains; dom++) { 453250601Sattilio domain = vm_rr_selectdomain(); 454250601Sattilio m = vm_phys_alloc_domain_pages(domain, flind, pool, order); 455250601Sattilio if (m != NULL) 456250601Sattilio return (m); 457250601Sattilio } 458250601Sattilio return (NULL); 459250219Sjhb} 460250219Sjhb 461250219Sjhbstatic vm_page_t 462250219Sjhbvm_phys_alloc_domain_pages(int domain, int flind, int pool, int order) 463250219Sjhb{ 464250219Sjhb struct vm_freelist *fl; 465250219Sjhb struct vm_freelist *alt; 466250219Sjhb int oind, pind; 467250219Sjhb vm_page_t m; 468250219Sjhb 469170477Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 470250601Sattilio fl = &vm_phys_free_queues[domain][flind][pool][0]; 471210327Sjchandra for (oind = order; oind < VM_NFREEORDER; oind++) { 472210327Sjchandra m = TAILQ_FIRST(&fl[oind].pl); 473210327Sjchandra if (m != NULL) { 474250601Sattilio vm_freelist_rem(fl, m, oind); 475210327Sjchandra vm_phys_split_pages(m, oind, fl, order); 476210327Sjchandra return (m); 477210327Sjchandra } 478210327Sjchandra } 479210327Sjchandra 480210327Sjchandra /* 481210327Sjchandra * The given pool was empty. Find the largest 482210327Sjchandra * contiguous, power-of-two-sized set of pages in any 483210327Sjchandra * pool. Transfer these pages to the given pool, and 484210327Sjchandra * use them to satisfy the allocation. 485210327Sjchandra */ 486210327Sjchandra for (oind = VM_NFREEORDER - 1; oind >= order; oind--) { 487210327Sjchandra for (pind = 0; pind < VM_NFREEPOOL; pind++) { 488250601Sattilio alt = &vm_phys_free_queues[domain][flind][pind][0]; 489210327Sjchandra m = TAILQ_FIRST(&alt[oind].pl); 490170477Salc if (m != NULL) { 491250601Sattilio vm_freelist_rem(alt, m, oind); 492210327Sjchandra vm_phys_set_pool(pool, m, oind); 493170477Salc vm_phys_split_pages(m, oind, fl, order); 494170477Salc return (m); 495170477Salc } 496170477Salc } 497170477Salc } 498170477Salc return (NULL); 499170477Salc} 500170477Salc 501170477Salc/* 502170477Salc * Find the vm_page corresponding to the given physical address. 503170477Salc */ 504170477Salcvm_page_t 505170477Salcvm_phys_paddr_to_vm_page(vm_paddr_t pa) 506170477Salc{ 507170477Salc struct vm_phys_seg *seg; 508170477Salc int segind; 509170477Salc 510170477Salc for (segind = 0; segind < vm_phys_nsegs; segind++) { 511170477Salc seg = &vm_phys_segs[segind]; 512170477Salc if (pa >= seg->start && pa < seg->end) 513170477Salc return (&seg->first_page[atop(pa - seg->start)]); 514170477Salc } 515194459Sthompsa return (NULL); 516170477Salc} 517170477Salc 518235372Skibvm_page_t 519235372Skibvm_phys_fictitious_to_vm_page(vm_paddr_t pa) 520235372Skib{ 521235372Skib struct vm_phys_fictitious_seg *seg; 522235372Skib vm_page_t m; 523235372Skib int segind; 524235372Skib 525235372Skib m = NULL; 526235372Skib for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 527235372Skib seg = &vm_phys_fictitious_segs[segind]; 528235372Skib if (pa >= seg->start && pa < seg->end) { 529235372Skib m = &seg->first_page[atop(pa - seg->start)]; 530235372Skib KASSERT((m->flags & PG_FICTITIOUS) != 0, 531235372Skib ("%p not fictitious", m)); 532235372Skib break; 533235372Skib } 534235372Skib } 535235372Skib return (m); 536235372Skib} 537235372Skib 538235372Skibint 539235372Skibvm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, 540235372Skib vm_memattr_t memattr) 541235372Skib{ 542235372Skib struct vm_phys_fictitious_seg *seg; 543235372Skib vm_page_t fp; 544235372Skib long i, page_count; 545235372Skib int segind; 546235372Skib#ifdef VM_PHYSSEG_DENSE 547235372Skib long pi; 548235372Skib boolean_t malloced; 549235372Skib#endif 550235372Skib 551235372Skib page_count = (end - start) / PAGE_SIZE; 552235372Skib 553235372Skib#ifdef VM_PHYSSEG_DENSE 554235372Skib pi = atop(start); 555265435Skib if (pi >= first_page && pi < vm_page_array_size + first_page) { 556265435Skib if (atop(end) >= vm_page_array_size + first_page) 557265435Skib return (EINVAL); 558235372Skib fp = &vm_page_array[pi - first_page]; 559235372Skib malloced = FALSE; 560235372Skib } else 561235372Skib#endif 562235372Skib { 563235372Skib fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, 564235372Skib M_WAITOK | M_ZERO); 565235372Skib#ifdef VM_PHYSSEG_DENSE 566235372Skib malloced = TRUE; 567235372Skib#endif 568235372Skib } 569235372Skib for (i = 0; i < page_count; i++) { 570235372Skib vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); 571254138Sattilio fp[i].oflags &= ~VPO_UNMANAGED; 572254138Sattilio fp[i].busy_lock = VPB_UNBUSIED; 573235372Skib } 574235372Skib mtx_lock(&vm_phys_fictitious_reg_mtx); 575235372Skib for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 576235372Skib seg = &vm_phys_fictitious_segs[segind]; 577235372Skib if (seg->start == 0 && seg->end == 0) { 578235372Skib seg->start = start; 579235372Skib seg->end = end; 580235372Skib seg->first_page = fp; 581235372Skib mtx_unlock(&vm_phys_fictitious_reg_mtx); 582235372Skib return (0); 583235372Skib } 584235372Skib } 585235372Skib mtx_unlock(&vm_phys_fictitious_reg_mtx); 586235372Skib#ifdef VM_PHYSSEG_DENSE 587235372Skib if (malloced) 588235372Skib#endif 589235372Skib free(fp, M_FICT_PAGES); 590235372Skib return (EBUSY); 591235372Skib} 592235372Skib 593235372Skibvoid 594235372Skibvm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) 595235372Skib{ 596235372Skib struct vm_phys_fictitious_seg *seg; 597235372Skib vm_page_t fp; 598235372Skib int segind; 599235372Skib#ifdef VM_PHYSSEG_DENSE 600235372Skib long pi; 601235372Skib#endif 602235372Skib 603235372Skib#ifdef VM_PHYSSEG_DENSE 604235372Skib pi = atop(start); 605235372Skib#endif 606235372Skib 607235372Skib mtx_lock(&vm_phys_fictitious_reg_mtx); 608235372Skib for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { 609235372Skib seg = &vm_phys_fictitious_segs[segind]; 610235372Skib if (seg->start == start && seg->end == end) { 611235372Skib seg->start = seg->end = 0; 612235372Skib fp = seg->first_page; 613235372Skib seg->first_page = NULL; 614235372Skib mtx_unlock(&vm_phys_fictitious_reg_mtx); 615235372Skib#ifdef VM_PHYSSEG_DENSE 616235372Skib if (pi < first_page || atop(end) >= vm_page_array_size) 617235372Skib#endif 618235372Skib free(fp, M_FICT_PAGES); 619235372Skib return; 620235372Skib } 621235372Skib } 622235372Skib mtx_unlock(&vm_phys_fictitious_reg_mtx); 623235372Skib KASSERT(0, ("Unregistering not registered fictitious range")); 624235372Skib} 625235372Skib 626170477Salc/* 627170477Salc * Find the segment containing the given physical address. 628170477Salc */ 629170477Salcstatic int 630170477Salcvm_phys_paddr_to_segind(vm_paddr_t pa) 631170477Salc{ 632170477Salc struct vm_phys_seg *seg; 633170477Salc int segind; 634170477Salc 635170477Salc for (segind = 0; segind < vm_phys_nsegs; segind++) { 636170477Salc seg = &vm_phys_segs[segind]; 637170477Salc if (pa >= seg->start && pa < seg->end) 638170477Salc return (segind); 639170477Salc } 640170477Salc panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , 641170477Salc (uintmax_t)pa); 642170477Salc} 643170477Salc 644170477Salc/* 645170477Salc * Free a contiguous, power of two-sized set of physical pages. 646171451Salc * 647171451Salc * The free page queues must be locked. 648170477Salc */ 649170477Salcvoid 650170477Salcvm_phys_free_pages(vm_page_t m, int order) 651170477Salc{ 652170477Salc struct vm_freelist *fl; 653170477Salc struct vm_phys_seg *seg; 654226928Salc vm_paddr_t pa; 655170477Salc vm_page_t m_buddy; 656170477Salc 657170477Salc KASSERT(m->order == VM_NFREEORDER, 658171451Salc ("vm_phys_free_pages: page %p has unexpected order %d", 659170477Salc m, m->order)); 660170477Salc KASSERT(m->pool < VM_NFREEPOOL, 661171451Salc ("vm_phys_free_pages: page %p has unexpected pool %d", 662170477Salc m, m->pool)); 663170477Salc KASSERT(order < VM_NFREEORDER, 664171451Salc ("vm_phys_free_pages: order %d is out of range", order)); 665170477Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 666170477Salc seg = &vm_phys_segs[m->segind]; 667226928Salc if (order < VM_NFREEORDER - 1) { 668226928Salc pa = VM_PAGE_TO_PHYS(m); 669226928Salc do { 670226928Salc pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order)); 671226928Salc if (pa < seg->start || pa >= seg->end) 672226928Salc break; 673226928Salc m_buddy = &seg->first_page[atop(pa - seg->start)]; 674226928Salc if (m_buddy->order != order) 675226928Salc break; 676226928Salc fl = (*seg->free_queues)[m_buddy->pool]; 677250601Sattilio vm_freelist_rem(fl, m_buddy, order); 678226928Salc if (m_buddy->pool != m->pool) 679226928Salc vm_phys_set_pool(m->pool, m_buddy, order); 680226928Salc order++; 681226928Salc pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1); 682226928Salc m = &seg->first_page[atop(pa - seg->start)]; 683226928Salc } while (order < VM_NFREEORDER - 1); 684170477Salc } 685170477Salc fl = (*seg->free_queues)[m->pool]; 686250601Sattilio vm_freelist_add(fl, m, order, 1); 687170477Salc} 688170477Salc 689170477Salc/* 690226928Salc * Free a contiguous, arbitrarily sized set of physical pages. 691226928Salc * 692226928Salc * The free page queues must be locked. 693226928Salc */ 694226928Salcvoid 695226928Salcvm_phys_free_contig(vm_page_t m, u_long npages) 696226928Salc{ 697226928Salc u_int n; 698226928Salc int order; 699226928Salc 700226928Salc /* 701226928Salc * Avoid unnecessary coalescing by freeing the pages in the largest 702226928Salc * possible power-of-two-sized subsets. 703226928Salc */ 704226928Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 705226928Salc for (;; npages -= n) { 706226928Salc /* 707226928Salc * Unsigned "min" is used here so that "order" is assigned 708226928Salc * "VM_NFREEORDER - 1" when "m"'s physical address is zero 709226928Salc * or the low-order bits of its physical address are zero 710226928Salc * because the size of a physical address exceeds the size of 711226928Salc * a long. 712226928Salc */ 713226928Salc order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1, 714226928Salc VM_NFREEORDER - 1); 715226928Salc n = 1 << order; 716226928Salc if (npages < n) 717226928Salc break; 718226928Salc vm_phys_free_pages(m, order); 719226928Salc m += n; 720226928Salc } 721226928Salc /* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */ 722226928Salc for (; npages > 0; npages -= n) { 723226928Salc order = flsl(npages) - 1; 724226928Salc n = 1 << order; 725226928Salc vm_phys_free_pages(m, order); 726226928Salc m += n; 727226928Salc } 728226928Salc} 729226928Salc 730226928Salc/* 731170477Salc * Set the pool for a contiguous, power of two-sized set of physical pages. 732170477Salc */ 733172317Salcvoid 734170477Salcvm_phys_set_pool(int pool, vm_page_t m, int order) 735170477Salc{ 736170477Salc vm_page_t m_tmp; 737170477Salc 738170477Salc for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++) 739170477Salc m_tmp->pool = pool; 740170477Salc} 741170477Salc 742170477Salc/* 743174825Salc * Search for the given physical page "m" in the free lists. If the search 744174825Salc * succeeds, remove "m" from the free lists and return TRUE. Otherwise, return 745174825Salc * FALSE, indicating that "m" is not in the free lists. 746172317Salc * 747172317Salc * The free page queues must be locked. 748170477Salc */ 749174821Salcboolean_t 750172317Salcvm_phys_unfree_page(vm_page_t m) 751172317Salc{ 752172317Salc struct vm_freelist *fl; 753172317Salc struct vm_phys_seg *seg; 754172317Salc vm_paddr_t pa, pa_half; 755172317Salc vm_page_t m_set, m_tmp; 756172317Salc int order; 757172317Salc 758172317Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 759172317Salc 760172317Salc /* 761172317Salc * First, find the contiguous, power of two-sized set of free 762172317Salc * physical pages containing the given physical page "m" and 763172317Salc * assign it to "m_set". 764172317Salc */ 765172317Salc seg = &vm_phys_segs[m->segind]; 766172317Salc for (m_set = m, order = 0; m_set->order == VM_NFREEORDER && 767174799Salc order < VM_NFREEORDER - 1; ) { 768172317Salc order++; 769172317Salc pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order)); 770177932Salc if (pa >= seg->start) 771174821Salc m_set = &seg->first_page[atop(pa - seg->start)]; 772174821Salc else 773174821Salc return (FALSE); 774172317Salc } 775174821Salc if (m_set->order < order) 776174821Salc return (FALSE); 777174821Salc if (m_set->order == VM_NFREEORDER) 778174821Salc return (FALSE); 779172317Salc KASSERT(m_set->order < VM_NFREEORDER, 780172317Salc ("vm_phys_unfree_page: page %p has unexpected order %d", 781172317Salc m_set, m_set->order)); 782172317Salc 783172317Salc /* 784172317Salc * Next, remove "m_set" from the free lists. Finally, extract 785172317Salc * "m" from "m_set" using an iterative algorithm: While "m_set" 786172317Salc * is larger than a page, shrink "m_set" by returning the half 787172317Salc * of "m_set" that does not contain "m" to the free lists. 788172317Salc */ 789172317Salc fl = (*seg->free_queues)[m_set->pool]; 790172317Salc order = m_set->order; 791250601Sattilio vm_freelist_rem(fl, m_set, order); 792172317Salc while (order > 0) { 793172317Salc order--; 794172317Salc pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order)); 795172317Salc if (m->phys_addr < pa_half) 796172317Salc m_tmp = &seg->first_page[atop(pa_half - seg->start)]; 797172317Salc else { 798172317Salc m_tmp = m_set; 799172317Salc m_set = &seg->first_page[atop(pa_half - seg->start)]; 800172317Salc } 801250601Sattilio vm_freelist_add(fl, m_tmp, order, 0); 802172317Salc } 803172317Salc KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency")); 804174821Salc return (TRUE); 805172317Salc} 806172317Salc 807172317Salc/* 808172317Salc * Try to zero one physical page. Used by an idle priority thread. 809172317Salc */ 810170477Salcboolean_t 811170477Salcvm_phys_zero_pages_idle(void) 812170477Salc{ 813250601Sattilio static struct vm_freelist *fl; 814172317Salc static int flind, oind, pind; 815170477Salc vm_page_t m, m_tmp; 816250601Sattilio int domain; 817170477Salc 818250601Sattilio domain = vm_rr_selectdomain(); 819250601Sattilio fl = vm_phys_free_queues[domain][0][0]; 820170477Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 821172317Salc for (;;) { 822254182Skib TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) { 823172317Salc for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { 824172317Salc if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { 825172317Salc vm_phys_unfree_page(m_tmp); 826254065Skib vm_phys_freecnt_adj(m, -1); 827172317Salc mtx_unlock(&vm_page_queue_free_mtx); 828172317Salc pmap_zero_page_idle(m_tmp); 829172317Salc m_tmp->flags |= PG_ZERO; 830172317Salc mtx_lock(&vm_page_queue_free_mtx); 831254065Skib vm_phys_freecnt_adj(m, 1); 832172317Salc vm_phys_free_pages(m_tmp, 0); 833172317Salc vm_page_zero_count++; 834172317Salc cnt_prezero++; 835172317Salc return (TRUE); 836170477Salc } 837170477Salc } 838170477Salc } 839172317Salc oind++; 840172317Salc if (oind == VM_NFREEORDER) { 841172317Salc oind = 0; 842172317Salc pind++; 843172317Salc if (pind == VM_NFREEPOOL) { 844172317Salc pind = 0; 845172317Salc flind++; 846172317Salc if (flind == vm_nfreelists) 847172317Salc flind = 0; 848172317Salc } 849250601Sattilio fl = vm_phys_free_queues[domain][flind][pind]; 850172317Salc } 851170477Salc } 852170477Salc} 853170477Salc 854170477Salc/* 855170818Salc * Allocate a contiguous set of physical pages of the given size 856170818Salc * "npages" from the free lists. All of the physical pages must be at 857170818Salc * or above the given physical address "low" and below the given 858170818Salc * physical address "high". The given value "alignment" determines the 859170818Salc * alignment of the first physical page in the set. If the given value 860170818Salc * "boundary" is non-zero, then the set of physical pages cannot cross 861170818Salc * any physical address boundary that is a multiple of that value. Both 862170477Salc * "alignment" and "boundary" must be a power of two. 863170477Salc */ 864170477Salcvm_page_t 865226928Salcvm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, 866226928Salc u_long alignment, vm_paddr_t boundary) 867170477Salc{ 868170477Salc struct vm_freelist *fl; 869170477Salc struct vm_phys_seg *seg; 870170477Salc vm_paddr_t pa, pa_last, size; 871227568Salc vm_page_t m, m_ret; 872226928Salc u_long npages_end; 873250601Sattilio int dom, domain, flind, oind, order, pind; 874170477Salc 875227568Salc mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); 876170477Salc size = npages << PAGE_SHIFT; 877170477Salc KASSERT(size != 0, 878170477Salc ("vm_phys_alloc_contig: size must not be 0")); 879170477Salc KASSERT((alignment & (alignment - 1)) == 0, 880170477Salc ("vm_phys_alloc_contig: alignment must be a power of 2")); 881170477Salc KASSERT((boundary & (boundary - 1)) == 0, 882170477Salc ("vm_phys_alloc_contig: boundary must be a power of 2")); 883170477Salc /* Compute the queue that is the best fit for npages. */ 884170477Salc for (order = 0; (1 << order) < npages; order++); 885250601Sattilio dom = 0; 886250601Sattiliorestartdom: 887250601Sattilio domain = vm_rr_selectdomain(); 888170477Salc for (flind = 0; flind < vm_nfreelists; flind++) { 889170477Salc for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) { 890170477Salc for (pind = 0; pind < VM_NFREEPOOL; pind++) { 891250601Sattilio fl = &vm_phys_free_queues[domain][flind][pind][0]; 892254182Skib TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) { 893170477Salc /* 894170477Salc * A free list may contain physical pages 895170477Salc * from one or more segments. 896170477Salc */ 897170477Salc seg = &vm_phys_segs[m_ret->segind]; 898170477Salc if (seg->start > high || 899170477Salc low >= seg->end) 900170477Salc continue; 901170477Salc 902170477Salc /* 903170477Salc * Is the size of this allocation request 904170477Salc * larger than the largest block size? 905170477Salc */ 906170477Salc if (order >= VM_NFREEORDER) { 907170477Salc /* 908170477Salc * Determine if a sufficient number 909170477Salc * of subsequent blocks to satisfy 910170477Salc * the allocation request are free. 911170477Salc */ 912170477Salc pa = VM_PAGE_TO_PHYS(m_ret); 913170477Salc pa_last = pa + size; 914170477Salc for (;;) { 915170477Salc pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1); 916170477Salc if (pa >= pa_last) 917170477Salc break; 918170477Salc if (pa < seg->start || 919170477Salc pa >= seg->end) 920170477Salc break; 921170477Salc m = &seg->first_page[atop(pa - seg->start)]; 922170477Salc if (m->order != VM_NFREEORDER - 1) 923170477Salc break; 924170477Salc } 925170477Salc /* If not, continue to the next block. */ 926170477Salc if (pa < pa_last) 927170477Salc continue; 928170477Salc } 929170477Salc 930170477Salc /* 931170477Salc * Determine if the blocks are within the given range, 932170477Salc * satisfy the given alignment, and do not cross the 933170477Salc * given boundary. 934170477Salc */ 935170477Salc pa = VM_PAGE_TO_PHYS(m_ret); 936170477Salc if (pa >= low && 937170477Salc pa + size <= high && 938170477Salc (pa & (alignment - 1)) == 0 && 939170477Salc ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0) 940170477Salc goto done; 941170477Salc } 942170477Salc } 943170477Salc } 944170477Salc } 945250601Sattilio if (++dom < vm_ndomains) 946250601Sattilio goto restartdom; 947170477Salc return (NULL); 948170477Salcdone: 949170477Salc for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) { 950170477Salc fl = (*seg->free_queues)[m->pool]; 951250601Sattilio vm_freelist_rem(fl, m, m->order); 952170477Salc } 953170477Salc if (m_ret->pool != VM_FREEPOOL_DEFAULT) 954170477Salc vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind); 955170477Salc fl = (*seg->free_queues)[m_ret->pool]; 956170477Salc vm_phys_split_pages(m_ret, oind, fl, order); 957226928Salc /* Return excess pages to the free lists. */ 958226928Salc npages_end = roundup2(npages, 1 << imin(oind, order)); 959226928Salc if (npages < npages_end) 960226928Salc vm_phys_free_contig(&m_ret[npages], npages_end - npages); 961170477Salc return (m_ret); 962170477Salc} 963170477Salc 964170477Salc#ifdef DDB 965170477Salc/* 966170477Salc * Show the number of physical pages in each of the free lists. 967170477Salc */ 968170477SalcDB_SHOW_COMMAND(freepages, db_show_freepages) 969170477Salc{ 970170477Salc struct vm_freelist *fl; 971250601Sattilio int flind, oind, pind, dom; 972170477Salc 973250601Sattilio for (dom = 0; dom < vm_ndomains; dom++) { 974250601Sattilio db_printf("DOMAIN: %d\n", dom); 975250601Sattilio for (flind = 0; flind < vm_nfreelists; flind++) { 976250601Sattilio db_printf("FREE LIST %d:\n" 977250601Sattilio "\n ORDER (SIZE) | NUMBER" 978250601Sattilio "\n ", flind); 979250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) 980250601Sattilio db_printf(" | POOL %d", pind); 981250601Sattilio db_printf("\n-- "); 982250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) 983250601Sattilio db_printf("-- -- "); 984250601Sattilio db_printf("--\n"); 985250601Sattilio for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) { 986250601Sattilio db_printf(" %2.2d (%6.6dK)", oind, 987250601Sattilio 1 << (PAGE_SHIFT - 10 + oind)); 988250601Sattilio for (pind = 0; pind < VM_NFREEPOOL; pind++) { 989250601Sattilio fl = vm_phys_free_queues[dom][flind][pind]; 990250601Sattilio db_printf(" | %6.6d", fl[oind].lcnt); 991250601Sattilio } 992250601Sattilio db_printf("\n"); 993170477Salc } 994170477Salc db_printf("\n"); 995170477Salc } 996170477Salc db_printf("\n"); 997170477Salc } 998170477Salc} 999170477Salc#endif 1000