1/*-
2 * Copyright (c) 2002-2006 Rice University
3 * Copyright (c) 2007 Alan L. Cox <alc@cs.rice.edu>
4 * All rights reserved.
5 *
6 * This software was developed for the FreeBSD Project by Alan L. Cox,
7 * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28 * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 *	Physical memory system implementation
34 *
35 * Any external functions defined by this module are only to be used by the
36 * virtual memory system.
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: stable/10/sys/vm/vm_phys.c 308349 2016-11-05 20:14:23Z markj $");
41
42#include "opt_ddb.h"
43#include "opt_vm.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/lock.h>
48#include <sys/kernel.h>
49#include <sys/malloc.h>
50#include <sys/mutex.h>
51#if MAXMEMDOM > 1
52#include <sys/proc.h>
53#endif
54#include <sys/queue.h>
55#include <sys/sbuf.h>
56#include <sys/sysctl.h>
57#include <sys/vmmeter.h>
58
59#include <ddb/ddb.h>
60
61#include <vm/vm.h>
62#include <vm/vm_param.h>
63#include <vm/vm_kern.h>
64#include <vm/vm_object.h>
65#include <vm/vm_page.h>
66#include <vm/vm_phys.h>
67
68_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
69    "Too many physsegs.");
70
71struct mem_affinity *mem_affinity;
72
73int vm_ndomains = 1;
74
75struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX];
76int vm_phys_nsegs;
77
78#define VM_PHYS_FICTITIOUS_NSEGS	8
79static struct vm_phys_fictitious_seg {
80	vm_paddr_t	start;
81	vm_paddr_t	end;
82	vm_page_t	first_page;
83} vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS];
84static struct mtx vm_phys_fictitious_reg_mtx;
85MALLOC_DEFINE(M_FICT_PAGES, "vm_fictitious", "Fictitious VM pages");
86
87static struct vm_freelist
88    vm_phys_free_queues[MAXMEMDOM][VM_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER];
89
90static int vm_nfreelists;
91
92/*
93 * Provides the mapping from VM_FREELIST_* to free list indices (flind).
94 */
95static int vm_freelist_to_flind[VM_NFREELIST];
96
97CTASSERT(VM_FREELIST_DEFAULT == 0);
98
99#ifdef VM_FREELIST_ISADMA
100#define	VM_ISADMA_BOUNDARY	16777216
101#endif
102#ifdef VM_FREELIST_DMA32
103#define	VM_DMA32_BOUNDARY	((vm_paddr_t)1 << 32)
104#endif
105
106/*
107 * Enforce the assumptions made by vm_phys_add_seg() and vm_phys_init() about
108 * the ordering of the free list boundaries.
109 */
110#if defined(VM_ISADMA_BOUNDARY) && defined(VM_LOWMEM_BOUNDARY)
111CTASSERT(VM_ISADMA_BOUNDARY < VM_LOWMEM_BOUNDARY);
112#endif
113#if defined(VM_LOWMEM_BOUNDARY) && defined(VM_DMA32_BOUNDARY)
114CTASSERT(VM_LOWMEM_BOUNDARY < VM_DMA32_BOUNDARY);
115#endif
116
117static int cnt_prezero;
118SYSCTL_INT(_vm_stats_misc, OID_AUTO, cnt_prezero, CTLFLAG_RD,
119    &cnt_prezero, 0, "The number of physical pages prezeroed at idle time");
120
121static int sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS);
122SYSCTL_OID(_vm, OID_AUTO, phys_free, CTLTYPE_STRING | CTLFLAG_RD,
123    NULL, 0, sysctl_vm_phys_free, "A", "Phys Free Info");
124
125static int sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS);
126SYSCTL_OID(_vm, OID_AUTO, phys_segs, CTLTYPE_STRING | CTLFLAG_RD,
127    NULL, 0, sysctl_vm_phys_segs, "A", "Phys Seg Info");
128
129SYSCTL_INT(_vm, OID_AUTO, ndomains, CTLFLAG_RD,
130    &vm_ndomains, 0, "Number of physical memory domains available.");
131
132static vm_page_t vm_phys_alloc_domain_pages(int domain, int flind, int pool,
133    int order);
134static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain);
135static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end);
136static int vm_phys_paddr_to_segind(vm_paddr_t pa);
137static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl,
138    int order);
139
140static __inline int
141vm_rr_selectdomain(void)
142{
143#if MAXMEMDOM > 1
144	struct thread *td;
145
146	td = curthread;
147
148	td->td_dom_rr_idx++;
149	td->td_dom_rr_idx %= vm_ndomains;
150	return (td->td_dom_rr_idx);
151#else
152	return (0);
153#endif
154}
155
156boolean_t
157vm_phys_domain_intersects(long mask, vm_paddr_t low, vm_paddr_t high)
158{
159	struct vm_phys_seg *s;
160	int idx;
161
162	while ((idx = ffsl(mask)) != 0) {
163		idx--;	/* ffsl counts from 1 */
164		mask &= ~(1UL << idx);
165		s = &vm_phys_segs[idx];
166		if (low < s->end && high > s->start)
167			return (TRUE);
168	}
169	return (FALSE);
170}
171
172/*
173 * Outputs the state of the physical memory allocator, specifically,
174 * the amount of physical memory in each free list.
175 */
176static int
177sysctl_vm_phys_free(SYSCTL_HANDLER_ARGS)
178{
179	struct sbuf sbuf;
180	struct vm_freelist *fl;
181	int dom, error, flind, oind, pind;
182
183	error = sysctl_wire_old_buffer(req, 0);
184	if (error != 0)
185		return (error);
186	sbuf_new_for_sysctl(&sbuf, NULL, 128 * vm_ndomains, req);
187	for (dom = 0; dom < vm_ndomains; dom++) {
188		sbuf_printf(&sbuf,"\nDOMAIN %d:\n", dom);
189		for (flind = 0; flind < vm_nfreelists; flind++) {
190			sbuf_printf(&sbuf, "\nFREE LIST %d:\n"
191			    "\n  ORDER (SIZE)  |  NUMBER"
192			    "\n              ", flind);
193			for (pind = 0; pind < VM_NFREEPOOL; pind++)
194				sbuf_printf(&sbuf, "  |  POOL %d", pind);
195			sbuf_printf(&sbuf, "\n--            ");
196			for (pind = 0; pind < VM_NFREEPOOL; pind++)
197				sbuf_printf(&sbuf, "-- --      ");
198			sbuf_printf(&sbuf, "--\n");
199			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
200				sbuf_printf(&sbuf, "  %2d (%6dK)", oind,
201				    1 << (PAGE_SHIFT - 10 + oind));
202				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
203				fl = vm_phys_free_queues[dom][flind][pind];
204					sbuf_printf(&sbuf, "  |  %6d",
205					    fl[oind].lcnt);
206				}
207				sbuf_printf(&sbuf, "\n");
208			}
209		}
210	}
211	error = sbuf_finish(&sbuf);
212	sbuf_delete(&sbuf);
213	return (error);
214}
215
216/*
217 * Outputs the set of physical memory segments.
218 */
219static int
220sysctl_vm_phys_segs(SYSCTL_HANDLER_ARGS)
221{
222	struct sbuf sbuf;
223	struct vm_phys_seg *seg;
224	int error, segind;
225
226	error = sysctl_wire_old_buffer(req, 0);
227	if (error != 0)
228		return (error);
229	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
230	for (segind = 0; segind < vm_phys_nsegs; segind++) {
231		sbuf_printf(&sbuf, "\nSEGMENT %d:\n\n", segind);
232		seg = &vm_phys_segs[segind];
233		sbuf_printf(&sbuf, "start:     %#jx\n",
234		    (uintmax_t)seg->start);
235		sbuf_printf(&sbuf, "end:       %#jx\n",
236		    (uintmax_t)seg->end);
237		sbuf_printf(&sbuf, "domain:    %d\n", seg->domain);
238		sbuf_printf(&sbuf, "free list: %p\n", seg->free_queues);
239	}
240	error = sbuf_finish(&sbuf);
241	sbuf_delete(&sbuf);
242	return (error);
243}
244
245static void
246vm_freelist_add(struct vm_freelist *fl, vm_page_t m, int order, int tail)
247{
248
249	m->order = order;
250	if (tail)
251		TAILQ_INSERT_TAIL(&fl[order].pl, m, plinks.q);
252	else
253		TAILQ_INSERT_HEAD(&fl[order].pl, m, plinks.q);
254	fl[order].lcnt++;
255}
256
257static void
258vm_freelist_rem(struct vm_freelist *fl, vm_page_t m, int order)
259{
260
261	TAILQ_REMOVE(&fl[order].pl, m, plinks.q);
262	fl[order].lcnt--;
263	m->order = VM_NFREEORDER;
264}
265
266/*
267 * Create a physical memory segment.
268 */
269static void
270_vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain)
271{
272	struct vm_phys_seg *seg;
273
274	KASSERT(vm_phys_nsegs < VM_PHYSSEG_MAX,
275	    ("vm_phys_create_seg: increase VM_PHYSSEG_MAX"));
276	KASSERT(domain < vm_ndomains,
277	    ("vm_phys_create_seg: invalid domain provided"));
278	seg = &vm_phys_segs[vm_phys_nsegs++];
279	while (seg > vm_phys_segs && (seg - 1)->start >= end) {
280		*seg = *(seg - 1);
281		seg--;
282	}
283	seg->start = start;
284	seg->end = end;
285	seg->domain = domain;
286}
287
288static void
289vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end)
290{
291	int i;
292
293	if (mem_affinity == NULL) {
294		_vm_phys_create_seg(start, end, 0);
295		return;
296	}
297
298	for (i = 0;; i++) {
299		if (mem_affinity[i].end == 0)
300			panic("Reached end of affinity info");
301		if (mem_affinity[i].end <= start)
302			continue;
303		if (mem_affinity[i].start > start)
304			panic("No affinity info for start %jx",
305			    (uintmax_t)start);
306		if (mem_affinity[i].end >= end) {
307			_vm_phys_create_seg(start, end,
308			    mem_affinity[i].domain);
309			break;
310		}
311		_vm_phys_create_seg(start, mem_affinity[i].end,
312		    mem_affinity[i].domain);
313		start = mem_affinity[i].end;
314	}
315}
316
317/*
318 * Add a physical memory segment.
319 */
320void
321vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end)
322{
323	vm_paddr_t paddr;
324
325	KASSERT((start & PAGE_MASK) == 0,
326	    ("vm_phys_define_seg: start is not page aligned"));
327	KASSERT((end & PAGE_MASK) == 0,
328	    ("vm_phys_define_seg: end is not page aligned"));
329
330	/*
331	 * Split the physical memory segment if it spans two or more free
332	 * list boundaries.
333	 */
334	paddr = start;
335#ifdef	VM_FREELIST_ISADMA
336	if (paddr < VM_ISADMA_BOUNDARY && end > VM_ISADMA_BOUNDARY) {
337		vm_phys_create_seg(paddr, VM_ISADMA_BOUNDARY);
338		paddr = VM_ISADMA_BOUNDARY;
339	}
340#endif
341#ifdef	VM_FREELIST_LOWMEM
342	if (paddr < VM_LOWMEM_BOUNDARY && end > VM_LOWMEM_BOUNDARY) {
343		vm_phys_create_seg(paddr, VM_LOWMEM_BOUNDARY);
344		paddr = VM_LOWMEM_BOUNDARY;
345	}
346#endif
347#ifdef	VM_FREELIST_DMA32
348	if (paddr < VM_DMA32_BOUNDARY && end > VM_DMA32_BOUNDARY) {
349		vm_phys_create_seg(paddr, VM_DMA32_BOUNDARY);
350		paddr = VM_DMA32_BOUNDARY;
351	}
352#endif
353	vm_phys_create_seg(paddr, end);
354}
355
356/*
357 * Initialize the physical memory allocator.
358 *
359 * Requires that vm_page_array is initialized!
360 */
361void
362vm_phys_init(void)
363{
364	struct vm_freelist *fl;
365	struct vm_phys_seg *seg;
366	u_long npages;
367	int dom, flind, freelist, oind, pind, segind;
368
369	/*
370	 * Compute the number of free lists, and generate the mapping from the
371	 * manifest constants VM_FREELIST_* to the free list indices.
372	 *
373	 * Initially, the entries of vm_freelist_to_flind[] are set to either
374	 * 0 or 1 to indicate which free lists should be created.
375	 */
376	npages = 0;
377	for (segind = vm_phys_nsegs - 1; segind >= 0; segind--) {
378		seg = &vm_phys_segs[segind];
379#ifdef	VM_FREELIST_ISADMA
380		if (seg->end <= VM_ISADMA_BOUNDARY)
381			vm_freelist_to_flind[VM_FREELIST_ISADMA] = 1;
382		else
383#endif
384#ifdef	VM_FREELIST_LOWMEM
385		if (seg->end <= VM_LOWMEM_BOUNDARY)
386			vm_freelist_to_flind[VM_FREELIST_LOWMEM] = 1;
387		else
388#endif
389#ifdef	VM_FREELIST_DMA32
390		if (
391#ifdef	VM_DMA32_NPAGES_THRESHOLD
392		    /*
393		     * Create the DMA32 free list only if the amount of
394		     * physical memory above physical address 4G exceeds the
395		     * given threshold.
396		     */
397		    npages > VM_DMA32_NPAGES_THRESHOLD &&
398#endif
399		    seg->end <= VM_DMA32_BOUNDARY)
400			vm_freelist_to_flind[VM_FREELIST_DMA32] = 1;
401		else
402#endif
403		{
404			npages += atop(seg->end - seg->start);
405			vm_freelist_to_flind[VM_FREELIST_DEFAULT] = 1;
406		}
407	}
408	/* Change each entry into a running total of the free lists. */
409	for (freelist = 1; freelist < VM_NFREELIST; freelist++) {
410		vm_freelist_to_flind[freelist] +=
411		    vm_freelist_to_flind[freelist - 1];
412	}
413	vm_nfreelists = vm_freelist_to_flind[VM_NFREELIST - 1];
414	KASSERT(vm_nfreelists > 0, ("vm_phys_init: no free lists"));
415	/* Change each entry into a free list index. */
416	for (freelist = 0; freelist < VM_NFREELIST; freelist++)
417		vm_freelist_to_flind[freelist]--;
418
419	/*
420	 * Initialize the first_page and free_queues fields of each physical
421	 * memory segment.
422	 */
423#ifdef VM_PHYSSEG_SPARSE
424	npages = 0;
425#endif
426	for (segind = 0; segind < vm_phys_nsegs; segind++) {
427		seg = &vm_phys_segs[segind];
428#ifdef VM_PHYSSEG_SPARSE
429		seg->first_page = &vm_page_array[npages];
430		npages += atop(seg->end - seg->start);
431#else
432		seg->first_page = PHYS_TO_VM_PAGE(seg->start);
433#endif
434#ifdef	VM_FREELIST_ISADMA
435		if (seg->end <= VM_ISADMA_BOUNDARY) {
436			flind = vm_freelist_to_flind[VM_FREELIST_ISADMA];
437			KASSERT(flind >= 0,
438			    ("vm_phys_init: ISADMA flind < 0"));
439		} else
440#endif
441#ifdef	VM_FREELIST_LOWMEM
442		if (seg->end <= VM_LOWMEM_BOUNDARY) {
443			flind = vm_freelist_to_flind[VM_FREELIST_LOWMEM];
444			KASSERT(flind >= 0,
445			    ("vm_phys_init: LOWMEM flind < 0"));
446		} else
447#endif
448#ifdef	VM_FREELIST_DMA32
449		if (seg->end <= VM_DMA32_BOUNDARY) {
450			flind = vm_freelist_to_flind[VM_FREELIST_DMA32];
451			KASSERT(flind >= 0,
452			    ("vm_phys_init: DMA32 flind < 0"));
453		} else
454#endif
455		{
456			flind = vm_freelist_to_flind[VM_FREELIST_DEFAULT];
457			KASSERT(flind >= 0,
458			    ("vm_phys_init: DEFAULT flind < 0"));
459		}
460		seg->free_queues = &vm_phys_free_queues[seg->domain][flind];
461	}
462
463	/*
464	 * Initialize the free queues.
465	 */
466	for (dom = 0; dom < vm_ndomains; dom++) {
467		for (flind = 0; flind < vm_nfreelists; flind++) {
468			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
469				fl = vm_phys_free_queues[dom][flind][pind];
470				for (oind = 0; oind < VM_NFREEORDER; oind++)
471					TAILQ_INIT(&fl[oind].pl);
472			}
473		}
474	}
475	mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF);
476}
477
478/*
479 * Split a contiguous, power of two-sized set of physical pages.
480 */
481static __inline void
482vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order)
483{
484	vm_page_t m_buddy;
485
486	while (oind > order) {
487		oind--;
488		m_buddy = &m[1 << oind];
489		KASSERT(m_buddy->order == VM_NFREEORDER,
490		    ("vm_phys_split_pages: page %p has unexpected order %d",
491		    m_buddy, m_buddy->order));
492		vm_freelist_add(fl, m_buddy, oind, 0);
493        }
494}
495
496/*
497 * Initialize a physical page and add it to the free lists.
498 */
499void
500vm_phys_add_page(vm_paddr_t pa)
501{
502	vm_page_t m;
503	struct vm_domain *vmd;
504
505	cnt.v_page_count++;
506	m = vm_phys_paddr_to_vm_page(pa);
507	m->busy_lock = VPB_UNBUSIED;
508	m->phys_addr = pa;
509	m->queue = PQ_NONE;
510	m->segind = vm_phys_paddr_to_segind(pa);
511	vmd = vm_phys_domain(m);
512	vmd->vmd_page_count++;
513	vmd->vmd_segs |= 1UL << m->segind;
514	m->flags = PG_FREE;
515	KASSERT(m->order == VM_NFREEORDER,
516	    ("vm_phys_add_page: page %p has unexpected order %d",
517	    m, m->order));
518	m->pool = VM_FREEPOOL_DEFAULT;
519	pmap_page_init(m);
520	mtx_lock(&vm_page_queue_free_mtx);
521	vm_phys_freecnt_adj(m, 1);
522	vm_phys_free_pages(m, 0);
523	mtx_unlock(&vm_page_queue_free_mtx);
524}
525
526/*
527 * Allocate a contiguous, power of two-sized set of physical pages
528 * from the free lists.
529 *
530 * The free page queues must be locked.
531 */
532vm_page_t
533vm_phys_alloc_pages(int pool, int order)
534{
535	vm_page_t m;
536	int dom, domain, flind;
537
538	KASSERT(pool < VM_NFREEPOOL,
539	    ("vm_phys_alloc_pages: pool %d is out of range", pool));
540	KASSERT(order < VM_NFREEORDER,
541	    ("vm_phys_alloc_pages: order %d is out of range", order));
542
543	for (dom = 0; dom < vm_ndomains; dom++) {
544		domain = vm_rr_selectdomain();
545		for (flind = 0; flind < vm_nfreelists; flind++) {
546			m = vm_phys_alloc_domain_pages(domain, flind, pool,
547			    order);
548			if (m != NULL)
549				return (m);
550		}
551	}
552	return (NULL);
553}
554
555/*
556 * Allocate a contiguous, power of two-sized set of physical pages from the
557 * specified free list.  The free list must be specified using one of the
558 * manifest constants VM_FREELIST_*.
559 *
560 * The free page queues must be locked.
561 */
562vm_page_t
563vm_phys_alloc_freelist_pages(int freelist, int pool, int order)
564{
565	vm_page_t m;
566	int dom, domain;
567
568	KASSERT(freelist < VM_NFREELIST,
569	    ("vm_phys_alloc_freelist_pages: freelist %d is out of range",
570	    freelist));
571	KASSERT(pool < VM_NFREEPOOL,
572	    ("vm_phys_alloc_freelist_pages: pool %d is out of range", pool));
573	KASSERT(order < VM_NFREEORDER,
574	    ("vm_phys_alloc_freelist_pages: order %d is out of range", order));
575	for (dom = 0; dom < vm_ndomains; dom++) {
576		domain = vm_rr_selectdomain();
577		m = vm_phys_alloc_domain_pages(domain,
578		    vm_freelist_to_flind[freelist], pool, order);
579		if (m != NULL)
580			return (m);
581	}
582	return (NULL);
583}
584
585static vm_page_t
586vm_phys_alloc_domain_pages(int domain, int flind, int pool, int order)
587{
588	struct vm_freelist *fl;
589	struct vm_freelist *alt;
590	int oind, pind;
591	vm_page_t m;
592
593	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
594	fl = &vm_phys_free_queues[domain][flind][pool][0];
595	for (oind = order; oind < VM_NFREEORDER; oind++) {
596		m = TAILQ_FIRST(&fl[oind].pl);
597		if (m != NULL) {
598			vm_freelist_rem(fl, m, oind);
599			vm_phys_split_pages(m, oind, fl, order);
600			return (m);
601		}
602	}
603
604	/*
605	 * The given pool was empty.  Find the largest
606	 * contiguous, power-of-two-sized set of pages in any
607	 * pool.  Transfer these pages to the given pool, and
608	 * use them to satisfy the allocation.
609	 */
610	for (oind = VM_NFREEORDER - 1; oind >= order; oind--) {
611		for (pind = 0; pind < VM_NFREEPOOL; pind++) {
612			alt = &vm_phys_free_queues[domain][flind][pind][0];
613			m = TAILQ_FIRST(&alt[oind].pl);
614			if (m != NULL) {
615				vm_freelist_rem(alt, m, oind);
616				vm_phys_set_pool(pool, m, oind);
617				vm_phys_split_pages(m, oind, fl, order);
618				return (m);
619			}
620		}
621	}
622	return (NULL);
623}
624
625/*
626 * Find the vm_page corresponding to the given physical address.
627 */
628vm_page_t
629vm_phys_paddr_to_vm_page(vm_paddr_t pa)
630{
631	struct vm_phys_seg *seg;
632	int segind;
633
634	for (segind = 0; segind < vm_phys_nsegs; segind++) {
635		seg = &vm_phys_segs[segind];
636		if (pa >= seg->start && pa < seg->end)
637			return (&seg->first_page[atop(pa - seg->start)]);
638	}
639	return (NULL);
640}
641
642vm_page_t
643vm_phys_fictitious_to_vm_page(vm_paddr_t pa)
644{
645	struct vm_phys_fictitious_seg *seg;
646	vm_page_t m;
647	int segind;
648
649	m = NULL;
650	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
651		seg = &vm_phys_fictitious_segs[segind];
652		if (pa >= seg->start && pa < seg->end) {
653			m = &seg->first_page[atop(pa - seg->start)];
654			KASSERT((m->flags & PG_FICTITIOUS) != 0,
655			    ("%p not fictitious", m));
656			break;
657		}
658	}
659	return (m);
660}
661
662int
663vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end,
664    vm_memattr_t memattr)
665{
666	struct vm_phys_fictitious_seg *seg;
667	vm_page_t fp;
668	long i, page_count;
669	int segind;
670#ifdef VM_PHYSSEG_DENSE
671	long pi;
672	boolean_t malloced;
673#endif
674
675	page_count = (end - start) / PAGE_SIZE;
676
677#ifdef VM_PHYSSEG_DENSE
678	pi = atop(start);
679	if (pi >= first_page && pi < vm_page_array_size + first_page) {
680		if (atop(end) >= vm_page_array_size + first_page)
681			return (EINVAL);
682		fp = &vm_page_array[pi - first_page];
683		malloced = FALSE;
684	} else
685#endif
686	{
687		fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES,
688		    M_WAITOK | M_ZERO);
689#ifdef VM_PHYSSEG_DENSE
690		malloced = TRUE;
691#endif
692	}
693	for (i = 0; i < page_count; i++) {
694		vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr);
695		fp[i].oflags &= ~VPO_UNMANAGED;
696		fp[i].busy_lock = VPB_UNBUSIED;
697	}
698	mtx_lock(&vm_phys_fictitious_reg_mtx);
699	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
700		seg = &vm_phys_fictitious_segs[segind];
701		if (seg->start == 0 && seg->end == 0) {
702			seg->start = start;
703			seg->end = end;
704			seg->first_page = fp;
705			mtx_unlock(&vm_phys_fictitious_reg_mtx);
706			return (0);
707		}
708	}
709	mtx_unlock(&vm_phys_fictitious_reg_mtx);
710#ifdef VM_PHYSSEG_DENSE
711	if (malloced)
712#endif
713		free(fp, M_FICT_PAGES);
714	return (EBUSY);
715}
716
717void
718vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end)
719{
720	struct vm_phys_fictitious_seg *seg;
721	vm_page_t fp;
722	int segind;
723#ifdef VM_PHYSSEG_DENSE
724	long pi;
725#endif
726
727#ifdef VM_PHYSSEG_DENSE
728	pi = atop(start);
729#endif
730
731	mtx_lock(&vm_phys_fictitious_reg_mtx);
732	for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) {
733		seg = &vm_phys_fictitious_segs[segind];
734		if (seg->start == start && seg->end == end) {
735			seg->start = seg->end = 0;
736			fp = seg->first_page;
737			seg->first_page = NULL;
738			mtx_unlock(&vm_phys_fictitious_reg_mtx);
739#ifdef VM_PHYSSEG_DENSE
740			if (pi < first_page || atop(end) >= vm_page_array_size)
741#endif
742				free(fp, M_FICT_PAGES);
743			return;
744		}
745	}
746	mtx_unlock(&vm_phys_fictitious_reg_mtx);
747	KASSERT(0, ("Unregistering not registered fictitious range"));
748}
749
750/*
751 * Find the segment containing the given physical address.
752 */
753static int
754vm_phys_paddr_to_segind(vm_paddr_t pa)
755{
756	struct vm_phys_seg *seg;
757	int segind;
758
759	for (segind = 0; segind < vm_phys_nsegs; segind++) {
760		seg = &vm_phys_segs[segind];
761		if (pa >= seg->start && pa < seg->end)
762			return (segind);
763	}
764	panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" ,
765	    (uintmax_t)pa);
766}
767
768/*
769 * Free a contiguous, power of two-sized set of physical pages.
770 *
771 * The free page queues must be locked.
772 */
773void
774vm_phys_free_pages(vm_page_t m, int order)
775{
776	struct vm_freelist *fl;
777	struct vm_phys_seg *seg;
778	vm_paddr_t pa;
779	vm_page_t m_buddy;
780
781	KASSERT(m->order == VM_NFREEORDER,
782	    ("vm_phys_free_pages: page %p has unexpected order %d",
783	    m, m->order));
784	KASSERT(m->pool < VM_NFREEPOOL,
785	    ("vm_phys_free_pages: page %p has unexpected pool %d",
786	    m, m->pool));
787	KASSERT(order < VM_NFREEORDER,
788	    ("vm_phys_free_pages: order %d is out of range", order));
789	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
790	seg = &vm_phys_segs[m->segind];
791	if (order < VM_NFREEORDER - 1) {
792		pa = VM_PAGE_TO_PHYS(m);
793		do {
794			pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + order));
795			if (pa < seg->start || pa >= seg->end)
796				break;
797			m_buddy = &seg->first_page[atop(pa - seg->start)];
798			if (m_buddy->order != order)
799				break;
800			fl = (*seg->free_queues)[m_buddy->pool];
801			vm_freelist_rem(fl, m_buddy, order);
802			if (m_buddy->pool != m->pool)
803				vm_phys_set_pool(m->pool, m_buddy, order);
804			order++;
805			pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + order)) - 1);
806			m = &seg->first_page[atop(pa - seg->start)];
807		} while (order < VM_NFREEORDER - 1);
808	}
809	fl = (*seg->free_queues)[m->pool];
810	vm_freelist_add(fl, m, order, 1);
811}
812
813/*
814 * Free a contiguous, arbitrarily sized set of physical pages.
815 *
816 * The free page queues must be locked.
817 */
818void
819vm_phys_free_contig(vm_page_t m, u_long npages)
820{
821	u_int n;
822	int order;
823
824	/*
825	 * Avoid unnecessary coalescing by freeing the pages in the largest
826	 * possible power-of-two-sized subsets.
827	 */
828	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
829	for (;; npages -= n) {
830		/*
831		 * Unsigned "min" is used here so that "order" is assigned
832		 * "VM_NFREEORDER - 1" when "m"'s physical address is zero
833		 * or the low-order bits of its physical address are zero
834		 * because the size of a physical address exceeds the size of
835		 * a long.
836		 */
837		order = min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
838		    VM_NFREEORDER - 1);
839		n = 1 << order;
840		if (npages < n)
841			break;
842		vm_phys_free_pages(m, order);
843		m += n;
844	}
845	/* The residual "npages" is less than "1 << (VM_NFREEORDER - 1)". */
846	for (; npages > 0; npages -= n) {
847		order = flsl(npages) - 1;
848		n = 1 << order;
849		vm_phys_free_pages(m, order);
850		m += n;
851	}
852}
853
854/*
855 * Set the pool for a contiguous, power of two-sized set of physical pages.
856 */
857void
858vm_phys_set_pool(int pool, vm_page_t m, int order)
859{
860	vm_page_t m_tmp;
861
862	for (m_tmp = m; m_tmp < &m[1 << order]; m_tmp++)
863		m_tmp->pool = pool;
864}
865
866/*
867 * Search for the given physical page "m" in the free lists.  If the search
868 * succeeds, remove "m" from the free lists and return TRUE.  Otherwise, return
869 * FALSE, indicating that "m" is not in the free lists.
870 *
871 * The free page queues must be locked.
872 */
873boolean_t
874vm_phys_unfree_page(vm_page_t m)
875{
876	struct vm_freelist *fl;
877	struct vm_phys_seg *seg;
878	vm_paddr_t pa, pa_half;
879	vm_page_t m_set, m_tmp;
880	int order;
881
882	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
883
884	/*
885	 * First, find the contiguous, power of two-sized set of free
886	 * physical pages containing the given physical page "m" and
887	 * assign it to "m_set".
888	 */
889	seg = &vm_phys_segs[m->segind];
890	for (m_set = m, order = 0; m_set->order == VM_NFREEORDER &&
891	    order < VM_NFREEORDER - 1; ) {
892		order++;
893		pa = m->phys_addr & (~(vm_paddr_t)0 << (PAGE_SHIFT + order));
894		if (pa >= seg->start)
895			m_set = &seg->first_page[atop(pa - seg->start)];
896		else
897			return (FALSE);
898	}
899	if (m_set->order < order)
900		return (FALSE);
901	if (m_set->order == VM_NFREEORDER)
902		return (FALSE);
903	KASSERT(m_set->order < VM_NFREEORDER,
904	    ("vm_phys_unfree_page: page %p has unexpected order %d",
905	    m_set, m_set->order));
906
907	/*
908	 * Next, remove "m_set" from the free lists.  Finally, extract
909	 * "m" from "m_set" using an iterative algorithm: While "m_set"
910	 * is larger than a page, shrink "m_set" by returning the half
911	 * of "m_set" that does not contain "m" to the free lists.
912	 */
913	fl = (*seg->free_queues)[m_set->pool];
914	order = m_set->order;
915	vm_freelist_rem(fl, m_set, order);
916	while (order > 0) {
917		order--;
918		pa_half = m_set->phys_addr ^ (1 << (PAGE_SHIFT + order));
919		if (m->phys_addr < pa_half)
920			m_tmp = &seg->first_page[atop(pa_half - seg->start)];
921		else {
922			m_tmp = m_set;
923			m_set = &seg->first_page[atop(pa_half - seg->start)];
924		}
925		vm_freelist_add(fl, m_tmp, order, 0);
926	}
927	KASSERT(m_set == m, ("vm_phys_unfree_page: fatal inconsistency"));
928	return (TRUE);
929}
930
931/*
932 * Try to zero one physical page.  Used by an idle priority thread.
933 */
934boolean_t
935vm_phys_zero_pages_idle(void)
936{
937	static struct vm_freelist *fl;
938	static int flind, oind, pind;
939	vm_page_t m, m_tmp;
940	int domain;
941
942	domain = vm_rr_selectdomain();
943	fl = vm_phys_free_queues[domain][0][0];
944	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
945	for (;;) {
946		TAILQ_FOREACH_REVERSE(m, &fl[oind].pl, pglist, plinks.q) {
947			for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) {
948				if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) {
949					vm_phys_unfree_page(m_tmp);
950					vm_phys_freecnt_adj(m, -1);
951					mtx_unlock(&vm_page_queue_free_mtx);
952					pmap_zero_page_idle(m_tmp);
953					m_tmp->flags |= PG_ZERO;
954					mtx_lock(&vm_page_queue_free_mtx);
955					vm_phys_freecnt_adj(m, 1);
956					vm_phys_free_pages(m_tmp, 0);
957					vm_page_zero_count++;
958					cnt_prezero++;
959					return (TRUE);
960				}
961			}
962		}
963		oind++;
964		if (oind == VM_NFREEORDER) {
965			oind = 0;
966			pind++;
967			if (pind == VM_NFREEPOOL) {
968				pind = 0;
969				flind++;
970				if (flind == vm_nfreelists)
971					flind = 0;
972			}
973			fl = vm_phys_free_queues[domain][flind][pind];
974		}
975	}
976}
977
978/*
979 * Allocate a contiguous set of physical pages of the given size
980 * "npages" from the free lists.  All of the physical pages must be at
981 * or above the given physical address "low" and below the given
982 * physical address "high".  The given value "alignment" determines the
983 * alignment of the first physical page in the set.  If the given value
984 * "boundary" is non-zero, then the set of physical pages cannot cross
985 * any physical address boundary that is a multiple of that value.  Both
986 * "alignment" and "boundary" must be a power of two.
987 */
988vm_page_t
989vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
990    u_long alignment, vm_paddr_t boundary)
991{
992	struct vm_freelist *fl;
993	struct vm_phys_seg *seg;
994	vm_paddr_t pa, pa_last, size;
995	vm_page_t m, m_ret;
996	u_long npages_end;
997	int dom, domain, flind, oind, order, pind;
998
999	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1000	size = npages << PAGE_SHIFT;
1001	KASSERT(size != 0,
1002	    ("vm_phys_alloc_contig: size must not be 0"));
1003	KASSERT((alignment & (alignment - 1)) == 0,
1004	    ("vm_phys_alloc_contig: alignment must be a power of 2"));
1005	KASSERT((boundary & (boundary - 1)) == 0,
1006	    ("vm_phys_alloc_contig: boundary must be a power of 2"));
1007	/* Compute the queue that is the best fit for npages. */
1008	for (order = 0; (1 << order) < npages; order++);
1009	dom = 0;
1010restartdom:
1011	domain = vm_rr_selectdomain();
1012	for (flind = 0; flind < vm_nfreelists; flind++) {
1013		for (oind = min(order, VM_NFREEORDER - 1); oind < VM_NFREEORDER; oind++) {
1014			for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1015				fl = &vm_phys_free_queues[domain][flind][pind][0];
1016				TAILQ_FOREACH(m_ret, &fl[oind].pl, plinks.q) {
1017					/*
1018					 * A free list may contain physical pages
1019					 * from one or more segments.
1020					 */
1021					seg = &vm_phys_segs[m_ret->segind];
1022					if (seg->start > high ||
1023					    low >= seg->end)
1024						continue;
1025
1026					/*
1027					 * Is the size of this allocation request
1028					 * larger than the largest block size?
1029					 */
1030					if (order >= VM_NFREEORDER) {
1031						/*
1032						 * Determine if a sufficient number
1033						 * of subsequent blocks to satisfy
1034						 * the allocation request are free.
1035						 */
1036						pa = VM_PAGE_TO_PHYS(m_ret);
1037						pa_last = pa + size;
1038						for (;;) {
1039							pa += 1 << (PAGE_SHIFT + VM_NFREEORDER - 1);
1040							if (pa >= pa_last)
1041								break;
1042							if (pa < seg->start ||
1043							    pa >= seg->end)
1044								break;
1045							m = &seg->first_page[atop(pa - seg->start)];
1046							if (m->order != VM_NFREEORDER - 1)
1047								break;
1048						}
1049						/* If not, continue to the next block. */
1050						if (pa < pa_last)
1051							continue;
1052					}
1053
1054					/*
1055					 * Determine if the blocks are within the given range,
1056					 * satisfy the given alignment, and do not cross the
1057					 * given boundary.
1058					 */
1059					pa = VM_PAGE_TO_PHYS(m_ret);
1060					if (pa >= low &&
1061					    pa + size <= high &&
1062					    (pa & (alignment - 1)) == 0 &&
1063					    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) == 0)
1064						goto done;
1065				}
1066			}
1067		}
1068	}
1069	if (++dom < vm_ndomains)
1070		goto restartdom;
1071	return (NULL);
1072done:
1073	for (m = m_ret; m < &m_ret[npages]; m = &m[1 << oind]) {
1074		fl = (*seg->free_queues)[m->pool];
1075		vm_freelist_rem(fl, m, m->order);
1076	}
1077	if (m_ret->pool != VM_FREEPOOL_DEFAULT)
1078		vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m_ret, oind);
1079	fl = (*seg->free_queues)[m_ret->pool];
1080	vm_phys_split_pages(m_ret, oind, fl, order);
1081	/* Return excess pages to the free lists. */
1082	npages_end = roundup2(npages, 1 << imin(oind, order));
1083	if (npages < npages_end)
1084		vm_phys_free_contig(&m_ret[npages], npages_end - npages);
1085	return (m_ret);
1086}
1087
1088#ifdef DDB
1089/*
1090 * Show the number of physical pages in each of the free lists.
1091 */
1092DB_SHOW_COMMAND(freepages, db_show_freepages)
1093{
1094	struct vm_freelist *fl;
1095	int flind, oind, pind, dom;
1096
1097	for (dom = 0; dom < vm_ndomains; dom++) {
1098		db_printf("DOMAIN: %d\n", dom);
1099		for (flind = 0; flind < vm_nfreelists; flind++) {
1100			db_printf("FREE LIST %d:\n"
1101			    "\n  ORDER (SIZE)  |  NUMBER"
1102			    "\n              ", flind);
1103			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1104				db_printf("  |  POOL %d", pind);
1105			db_printf("\n--            ");
1106			for (pind = 0; pind < VM_NFREEPOOL; pind++)
1107				db_printf("-- --      ");
1108			db_printf("--\n");
1109			for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
1110				db_printf("  %2.2d (%6.6dK)", oind,
1111				    1 << (PAGE_SHIFT - 10 + oind));
1112				for (pind = 0; pind < VM_NFREEPOOL; pind++) {
1113				fl = vm_phys_free_queues[dom][flind][pind];
1114					db_printf("  |  %6.6d", fl[oind].lcnt);
1115				}
1116				db_printf("\n");
1117			}
1118			db_printf("\n");
1119		}
1120		db_printf("\n");
1121	}
1122}
1123#endif
1124