1174982Salc/*-
2174982Salc * Copyright (c) 2002-2006 Rice University
3259999Salc * Copyright (c) 2007-2011 Alan L. Cox <alc@cs.rice.edu>
4174982Salc * All rights reserved.
5174982Salc *
6174982Salc * This software was developed for the FreeBSD Project by Alan L. Cox,
7174982Salc * Olivier Crameri, Peter Druschel, Sitaram Iyer, and Juan Navarro.
8174982Salc *
9174982Salc * Redistribution and use in source and binary forms, with or without
10174982Salc * modification, are permitted provided that the following conditions
11174982Salc * are met:
12174982Salc * 1. Redistributions of source code must retain the above copyright
13174982Salc *    notice, this list of conditions and the following disclaimer.
14174982Salc * 2. Redistributions in binary form must reproduce the above copyright
15174982Salc *    notice, this list of conditions and the following disclaimer in the
16174982Salc *    documentation and/or other materials provided with the distribution.
17174982Salc *
18174982Salc * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19174982Salc * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20174982Salc * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21174982Salc * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT
22174982Salc * HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23174982Salc * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24174982Salc * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
25174982Salc * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26174982Salc * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27174982Salc * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
28174982Salc * WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29174982Salc * POSSIBILITY OF SUCH DAMAGE.
30174982Salc */
31174982Salc
32174982Salc/*
33174982Salc *	Superpage reservation management module
34228287Salc *
35228287Salc * Any external functions defined by this module are only to be used by the
36228287Salc * virtual memory system.
37174982Salc */
38174982Salc
39174982Salc#include <sys/cdefs.h>
40174982Salc__FBSDID("$FreeBSD: releng/11.0/sys/vm/vm_reserv.c 292469 2015-12-19 18:42:50Z alc $");
41174982Salc
42174982Salc#include "opt_vm.h"
43174982Salc
44174982Salc#include <sys/param.h>
45174982Salc#include <sys/kernel.h>
46174982Salc#include <sys/lock.h>
47174982Salc#include <sys/malloc.h>
48174982Salc#include <sys/mutex.h>
49174982Salc#include <sys/queue.h>
50248084Sattilio#include <sys/rwlock.h>
51174982Salc#include <sys/sbuf.h>
52174982Salc#include <sys/sysctl.h>
53174982Salc#include <sys/systm.h>
54174982Salc
55174982Salc#include <vm/vm.h>
56174982Salc#include <vm/vm_param.h>
57174982Salc#include <vm/vm_object.h>
58174982Salc#include <vm/vm_page.h>
59174982Salc#include <vm/vm_phys.h>
60248449Sattilio#include <vm/vm_radix.h>
61174982Salc#include <vm/vm_reserv.h>
62174982Salc
63174982Salc/*
64174982Salc * The reservation system supports the speculative allocation of large physical
65174982Salc * pages ("superpages").  Speculative allocation enables the fully-automatic
66174982Salc * utilization of superpages by the virtual memory system.  In other words, no
67174982Salc * programmatic directives are required to use superpages.
68174982Salc */
69174982Salc
70174982Salc#if VM_NRESERVLEVEL > 0
71174982Salc
72174982Salc/*
73174982Salc * The number of small pages that are contained in a level 0 reservation
74174982Salc */
75174982Salc#define	VM_LEVEL_0_NPAGES	(1 << VM_LEVEL_0_ORDER)
76174982Salc
77174982Salc/*
78174982Salc * The number of bits by which a physical address is shifted to obtain the
79174982Salc * reservation number
80174982Salc */
81174982Salc#define	VM_LEVEL_0_SHIFT	(VM_LEVEL_0_ORDER + PAGE_SHIFT)
82174982Salc
83174982Salc/*
84174982Salc * The size of a level 0 reservation in bytes
85174982Salc */
86174982Salc#define	VM_LEVEL_0_SIZE		(1 << VM_LEVEL_0_SHIFT)
87174982Salc
88174982Salc/*
89174982Salc * Computes the index of the small page underlying the given (object, pindex)
90174982Salc * within the reservation's array of small pages.
91174982Salc */
92174982Salc#define	VM_RESERV_INDEX(object, pindex)	\
93174982Salc    (((object)->pg_color + (pindex)) & (VM_LEVEL_0_NPAGES - 1))
94174982Salc
95174982Salc/*
96259999Salc * The size of a population map entry
97259999Salc */
98259999Salctypedef	u_long		popmap_t;
99259999Salc
100259999Salc/*
101259999Salc * The number of bits in a population map entry
102259999Salc */
103259999Salc#define	NBPOPMAP	(NBBY * sizeof(popmap_t))
104259999Salc
105259999Salc/*
106259999Salc * The number of population map entries in a reservation
107259999Salc */
108259999Salc#define	NPOPMAP		howmany(VM_LEVEL_0_NPAGES, NBPOPMAP)
109259999Salc
110259999Salc/*
111267364Salc * Clear a bit in the population map.
112267364Salc */
113267364Salcstatic __inline void
114267364Salcpopmap_clear(popmap_t popmap[], int i)
115267364Salc{
116267364Salc
117267364Salc	popmap[i / NBPOPMAP] &= ~(1UL << (i % NBPOPMAP));
118267364Salc}
119267364Salc
120267364Salc/*
121267364Salc * Set a bit in the population map.
122267364Salc */
123267364Salcstatic __inline void
124267364Salcpopmap_set(popmap_t popmap[], int i)
125267364Salc{
126267364Salc
127267364Salc	popmap[i / NBPOPMAP] |= 1UL << (i % NBPOPMAP);
128267364Salc}
129267364Salc
130267364Salc/*
131267364Salc * Is a bit in the population map clear?
132267364Salc */
133267364Salcstatic __inline boolean_t
134267364Salcpopmap_is_clear(popmap_t popmap[], int i)
135267364Salc{
136267364Salc
137267364Salc	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) == 0);
138267364Salc}
139267364Salc
140267364Salc/*
141267364Salc * Is a bit in the population map set?
142267364Salc */
143267364Salcstatic __inline boolean_t
144267364Salcpopmap_is_set(popmap_t popmap[], int i)
145267364Salc{
146267364Salc
147267364Salc	return ((popmap[i / NBPOPMAP] & (1UL << (i % NBPOPMAP))) != 0);
148267364Salc}
149267364Salc
150267364Salc/*
151174982Salc * The reservation structure
152174982Salc *
153174982Salc * A reservation structure is constructed whenever a large physical page is
154174982Salc * speculatively allocated to an object.  The reservation provides the small
155174982Salc * physical pages for the range [pindex, pindex + VM_LEVEL_0_NPAGES) of offsets
156174982Salc * within that object.  The reservation's "popcnt" tracks the number of these
157174982Salc * small physical pages that are in use at any given time.  When and if the
158174982Salc * reservation is not fully utilized, it appears in the queue of partially-
159174982Salc * populated reservations.  The reservation always appears on the containing
160174982Salc * object's list of reservations.
161174982Salc *
162174982Salc * A partially-populated reservation can be broken and reclaimed at any time.
163174982Salc */
164174982Salcstruct vm_reserv {
165174982Salc	TAILQ_ENTRY(vm_reserv) partpopq;
166174982Salc	LIST_ENTRY(vm_reserv) objq;
167174982Salc	vm_object_t	object;			/* containing object */
168174982Salc	vm_pindex_t	pindex;			/* offset within object */
169174982Salc	vm_page_t	pages;			/* first page of a superpage */
170174982Salc	int		popcnt;			/* # of pages in use */
171174982Salc	char		inpartpopq;
172259999Salc	popmap_t	popmap[NPOPMAP];	/* bit vector of used pages */
173174982Salc};
174174982Salc
175174982Salc/*
176174982Salc * The reservation array
177174982Salc *
178174982Salc * This array is analoguous in function to vm_page_array.  It differs in the
179174982Salc * respect that it may contain a greater number of useful reservation
180174982Salc * structures than there are (physical) superpages.  These "invalid"
181174982Salc * reservation structures exist to trade-off space for time in the
182174982Salc * implementation of vm_reserv_from_page().  Invalid reservation structures are
183174982Salc * distinguishable from "valid" reservation structures by inspecting the
184174982Salc * reservation's "pages" field.  Invalid reservation structures have a NULL
185174982Salc * "pages" field.
186174982Salc *
187174982Salc * vm_reserv_from_page() maps a small (physical) page to an element of this
188174982Salc * array by computing a physical reservation number from the page's physical
189174982Salc * address.  The physical reservation number is used as the array index.
190174982Salc *
191174982Salc * An "active" reservation is a valid reservation structure that has a non-NULL
192174982Salc * "object" field and a non-zero "popcnt" field.  In other words, every active
193174982Salc * reservation belongs to a particular object.  Moreover, every active
194174982Salc * reservation has an entry in the containing object's list of reservations.
195174982Salc */
196174982Salcstatic vm_reserv_t vm_reserv_array;
197174982Salc
198174982Salc/*
199174982Salc * The partially-populated reservation queue
200174982Salc *
201174982Salc * This queue enables the fast recovery of an unused cached or free small page
202190912Salc * from a partially-populated reservation.  The reservation at the head of
203190912Salc * this queue is the least-recently-changed, partially-populated reservation.
204174982Salc *
205174982Salc * Access to this queue is synchronized by the free page queue lock.
206174982Salc */
207174982Salcstatic TAILQ_HEAD(, vm_reserv) vm_rvq_partpop =
208174982Salc			    TAILQ_HEAD_INITIALIZER(vm_rvq_partpop);
209174982Salc
210174982Salcstatic SYSCTL_NODE(_vm, OID_AUTO, reserv, CTLFLAG_RD, 0, "Reservation Info");
211174982Salc
212174982Salcstatic long vm_reserv_broken;
213174982SalcSYSCTL_LONG(_vm_reserv, OID_AUTO, broken, CTLFLAG_RD,
214174982Salc    &vm_reserv_broken, 0, "Cumulative number of broken reservations");
215174982Salc
216174982Salcstatic long vm_reserv_freed;
217174982SalcSYSCTL_LONG(_vm_reserv, OID_AUTO, freed, CTLFLAG_RD,
218174982Salc    &vm_reserv_freed, 0, "Cumulative number of freed reservations");
219174982Salc
220286390Salcstatic int sysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS);
221286390Salc
222286390SalcSYSCTL_PROC(_vm_reserv, OID_AUTO, fullpop, CTLTYPE_INT | CTLFLAG_RD, NULL, 0,
223286390Salc    sysctl_vm_reserv_fullpop, "I", "Current number of full reservations");
224286390Salc
225174982Salcstatic int sysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS);
226174982Salc
227174982SalcSYSCTL_OID(_vm_reserv, OID_AUTO, partpopq, CTLTYPE_STRING | CTLFLAG_RD, NULL, 0,
228174982Salc    sysctl_vm_reserv_partpopq, "A", "Partially-populated reservation queues");
229174982Salc
230174982Salcstatic long vm_reserv_reclaimed;
231174982SalcSYSCTL_LONG(_vm_reserv, OID_AUTO, reclaimed, CTLFLAG_RD,
232174982Salc    &vm_reserv_reclaimed, 0, "Cumulative number of reclaimed reservations");
233174982Salc
234259999Salcstatic void		vm_reserv_break(vm_reserv_t rv, vm_page_t m);
235259999Salcstatic void		vm_reserv_depopulate(vm_reserv_t rv, int index);
236174982Salcstatic vm_reserv_t	vm_reserv_from_page(vm_page_t m);
237174982Salcstatic boolean_t	vm_reserv_has_pindex(vm_reserv_t rv,
238174982Salc			    vm_pindex_t pindex);
239259999Salcstatic void		vm_reserv_populate(vm_reserv_t rv, int index);
240177956Salcstatic void		vm_reserv_reclaim(vm_reserv_t rv);
241174982Salc
242174982Salc/*
243286390Salc * Returns the current number of full reservations.
244286390Salc *
245286390Salc * Since the number of full reservations is computed without acquiring the
246286390Salc * free page queue lock, the returned value may be inexact.
247286390Salc */
248286390Salcstatic int
249286390Salcsysctl_vm_reserv_fullpop(SYSCTL_HANDLER_ARGS)
250286390Salc{
251286390Salc	vm_paddr_t paddr;
252286390Salc	struct vm_phys_seg *seg;
253286390Salc	vm_reserv_t rv;
254286390Salc	int fullpop, segind;
255286390Salc
256286390Salc	fullpop = 0;
257286390Salc	for (segind = 0; segind < vm_phys_nsegs; segind++) {
258286390Salc		seg = &vm_phys_segs[segind];
259286390Salc		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
260286390Salc		while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
261286390Salc			rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT];
262286390Salc			fullpop += rv->popcnt == VM_LEVEL_0_NPAGES;
263286390Salc			paddr += VM_LEVEL_0_SIZE;
264286390Salc		}
265286390Salc	}
266286390Salc	return (sysctl_handle_int(oidp, &fullpop, 0, req));
267286390Salc}
268286390Salc
269286390Salc/*
270174982Salc * Describes the current state of the partially-populated reservation queue.
271174982Salc */
272174982Salcstatic int
273174982Salcsysctl_vm_reserv_partpopq(SYSCTL_HANDLER_ARGS)
274174982Salc{
275174982Salc	struct sbuf sbuf;
276174982Salc	vm_reserv_t rv;
277174982Salc	int counter, error, level, unused_pages;
278174982Salc
279217916Smdf	error = sysctl_wire_old_buffer(req, 0);
280217916Smdf	if (error != 0)
281217916Smdf		return (error);
282212750Smdf	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
283174982Salc	sbuf_printf(&sbuf, "\nLEVEL     SIZE  NUMBER\n\n");
284174982Salc	for (level = -1; level <= VM_NRESERVLEVEL - 2; level++) {
285174982Salc		counter = 0;
286174982Salc		unused_pages = 0;
287174982Salc		mtx_lock(&vm_page_queue_free_mtx);
288174982Salc		TAILQ_FOREACH(rv, &vm_rvq_partpop/*[level]*/, partpopq) {
289174982Salc			counter++;
290174982Salc			unused_pages += VM_LEVEL_0_NPAGES - rv->popcnt;
291174982Salc		}
292174982Salc		mtx_unlock(&vm_page_queue_free_mtx);
293214564Salc		sbuf_printf(&sbuf, "%5d: %6dK, %6d\n", level,
294215093Salc		    unused_pages * ((int)PAGE_SIZE / 1024), counter);
295174982Salc	}
296212750Smdf	error = sbuf_finish(&sbuf);
297174982Salc	sbuf_delete(&sbuf);
298174982Salc	return (error);
299174982Salc}
300174982Salc
301174982Salc/*
302174982Salc * Reduces the given reservation's population count.  If the population count
303174982Salc * becomes zero, the reservation is destroyed.  Additionally, moves the
304259999Salc * reservation to the tail of the partially-populated reservation queue if the
305174982Salc * population count is non-zero.
306174982Salc *
307174982Salc * The free page queue lock must be held.
308174982Salc */
309174982Salcstatic void
310259999Salcvm_reserv_depopulate(vm_reserv_t rv, int index)
311174982Salc{
312174982Salc
313174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
314174982Salc	KASSERT(rv->object != NULL,
315174982Salc	    ("vm_reserv_depopulate: reserv %p is free", rv));
316267364Salc	KASSERT(popmap_is_set(rv->popmap, index),
317260032Salc	    ("vm_reserv_depopulate: reserv %p's popmap[%d] is clear", rv,
318260032Salc	    index));
319174982Salc	KASSERT(rv->popcnt > 0,
320174982Salc	    ("vm_reserv_depopulate: reserv %p's popcnt is corrupted", rv));
321174982Salc	if (rv->inpartpopq) {
322174982Salc		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
323174982Salc		rv->inpartpopq = FALSE;
324267213Salc	} else {
325267213Salc		KASSERT(rv->pages->psind == 1,
326267213Salc		    ("vm_reserv_depopulate: reserv %p is already demoted",
327267213Salc		    rv));
328267213Salc		rv->pages->psind = 0;
329174982Salc	}
330267364Salc	popmap_clear(rv->popmap, index);
331174982Salc	rv->popcnt--;
332174982Salc	if (rv->popcnt == 0) {
333174982Salc		LIST_REMOVE(rv, objq);
334174982Salc		rv->object = NULL;
335174982Salc		vm_phys_free_pages(rv->pages, VM_LEVEL_0_ORDER);
336174982Salc		vm_reserv_freed++;
337174982Salc	} else {
338174982Salc		rv->inpartpopq = TRUE;
339190912Salc		TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
340174982Salc	}
341174982Salc}
342174982Salc
343174982Salc/*
344174982Salc * Returns the reservation to which the given page might belong.
345174982Salc */
346174982Salcstatic __inline vm_reserv_t
347174982Salcvm_reserv_from_page(vm_page_t m)
348174982Salc{
349174982Salc
350174982Salc	return (&vm_reserv_array[VM_PAGE_TO_PHYS(m) >> VM_LEVEL_0_SHIFT]);
351174982Salc}
352174982Salc
353174982Salc/*
354174982Salc * Returns TRUE if the given reservation contains the given page index and
355174982Salc * FALSE otherwise.
356174982Salc */
357174982Salcstatic __inline boolean_t
358174982Salcvm_reserv_has_pindex(vm_reserv_t rv, vm_pindex_t pindex)
359174982Salc{
360174982Salc
361174982Salc	return (((pindex - rv->pindex) & ~(VM_LEVEL_0_NPAGES - 1)) == 0);
362174982Salc}
363174982Salc
364174982Salc/*
365174982Salc * Increases the given reservation's population count.  Moves the reservation
366174982Salc * to the tail of the partially-populated reservation queue.
367174982Salc *
368174982Salc * The free page queue must be locked.
369174982Salc */
370174982Salcstatic void
371259999Salcvm_reserv_populate(vm_reserv_t rv, int index)
372174982Salc{
373174982Salc
374174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
375174982Salc	KASSERT(rv->object != NULL,
376174982Salc	    ("vm_reserv_populate: reserv %p is free", rv));
377267364Salc	KASSERT(popmap_is_clear(rv->popmap, index),
378260032Salc	    ("vm_reserv_populate: reserv %p's popmap[%d] is set", rv,
379260032Salc	    index));
380174982Salc	KASSERT(rv->popcnt < VM_LEVEL_0_NPAGES,
381174982Salc	    ("vm_reserv_populate: reserv %p is already full", rv));
382267213Salc	KASSERT(rv->pages->psind == 0,
383267213Salc	    ("vm_reserv_populate: reserv %p is already promoted", rv));
384174982Salc	if (rv->inpartpopq) {
385174982Salc		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
386174982Salc		rv->inpartpopq = FALSE;
387174982Salc	}
388267364Salc	popmap_set(rv->popmap, index);
389174982Salc	rv->popcnt++;
390174982Salc	if (rv->popcnt < VM_LEVEL_0_NPAGES) {
391174982Salc		rv->inpartpopq = TRUE;
392174982Salc		TAILQ_INSERT_TAIL(&vm_rvq_partpop, rv, partpopq);
393267213Salc	} else
394267213Salc		rv->pages->psind = 1;
395174982Salc}
396174982Salc
397174982Salc/*
398228287Salc * Allocates a contiguous set of physical pages of the given size "npages"
399271351Salc * from existing or newly created reservations.  All of the physical pages
400228287Salc * must be at or above the given physical address "low" and below the given
401228287Salc * physical address "high".  The given value "alignment" determines the
402228287Salc * alignment of the first physical page in the set.  If the given value
403228287Salc * "boundary" is non-zero, then the set of physical pages cannot cross any
404228287Salc * physical address boundary that is a multiple of that value.  Both
405228287Salc * "alignment" and "boundary" must be a power of two.
406174982Salc *
407174982Salc * The object and free page queue must be locked.
408174982Salc */
409174982Salcvm_page_t
410228287Salcvm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, u_long npages,
411228287Salc    vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary)
412174982Salc{
413228287Salc	vm_paddr_t pa, size;
414228287Salc	vm_page_t m, m_ret, mpred, msucc;
415174982Salc	vm_pindex_t first, leftcap, rightcap;
416174982Salc	vm_reserv_t rv;
417228287Salc	u_long allocpages, maxpages, minpages;
418228287Salc	int i, index, n;
419174982Salc
420174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
421248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
422228287Salc	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
423174982Salc
424174982Salc	/*
425228287Salc	 * Is a reservation fundamentally impossible?
426174982Salc	 */
427174982Salc	if (pindex < VM_RESERV_INDEX(object, pindex) ||
428228287Salc	    pindex + npages > object->size)
429174982Salc		return (NULL);
430174982Salc
431174982Salc	/*
432228287Salc	 * All reservations of a particular size have the same alignment.
433228287Salc	 * Assuming that the first page is allocated from a reservation, the
434228287Salc	 * least significant bits of its physical address can be determined
435228287Salc	 * from its offset from the beginning of the reservation and the size
436228287Salc	 * of the reservation.
437228287Salc	 *
438228287Salc	 * Could the specified index within a reservation of the smallest
439228287Salc	 * possible size satisfy the alignment and boundary requirements?
440228287Salc	 */
441228287Salc	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
442228287Salc	if ((pa & (alignment - 1)) != 0)
443228287Salc		return (NULL);
444228287Salc	size = npages << PAGE_SHIFT;
445228287Salc	if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
446228287Salc		return (NULL);
447228287Salc
448228287Salc	/*
449174982Salc	 * Look for an existing reservation.
450174982Salc	 */
451248449Sattilio	mpred = vm_radix_lookup_le(&object->rtree, pindex);
452248449Sattilio	if (mpred != NULL) {
453248449Sattilio		KASSERT(mpred->pindex < pindex,
454228287Salc		    ("vm_reserv_alloc_contig: pindex already allocated"));
455174982Salc		rv = vm_reserv_from_page(mpred);
456228287Salc		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
457228287Salc			goto found;
458248449Sattilio		msucc = TAILQ_NEXT(mpred, listq);
459248449Sattilio	} else
460248449Sattilio		msucc = TAILQ_FIRST(&object->memq);
461248449Sattilio	if (msucc != NULL) {
462248449Sattilio		KASSERT(msucc->pindex > pindex,
463279764Skib		    ("vm_reserv_alloc_contig: pindex already allocated"));
464248449Sattilio		rv = vm_reserv_from_page(msucc);
465248449Sattilio		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
466248449Sattilio			goto found;
467174982Salc	}
468174982Salc
469174982Salc	/*
470228287Salc	 * Could at least one reservation fit between the first index to the
471271351Salc	 * left that can be used ("leftcap") and the first index to the right
472271351Salc	 * that cannot be used ("rightcap")?
473174982Salc	 */
474174982Salc	first = pindex - VM_RESERV_INDEX(object, pindex);
475228287Salc	if (mpred != NULL) {
476228287Salc		if ((rv = vm_reserv_from_page(mpred))->object != object)
477228287Salc			leftcap = mpred->pindex + 1;
478228287Salc		else
479228287Salc			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
480228287Salc		if (leftcap > first)
481228287Salc			return (NULL);
482228287Salc	}
483228287Salc	minpages = VM_RESERV_INDEX(object, pindex) + npages;
484228287Salc	maxpages = roundup2(minpages, VM_LEVEL_0_NPAGES);
485228287Salc	allocpages = maxpages;
486228287Salc	if (msucc != NULL) {
487228287Salc		if ((rv = vm_reserv_from_page(msucc))->object != object)
488228287Salc			rightcap = msucc->pindex;
489228287Salc		else
490228287Salc			rightcap = rv->pindex;
491228287Salc		if (first + maxpages > rightcap) {
492228287Salc			if (maxpages == VM_LEVEL_0_NPAGES)
493228287Salc				return (NULL);
494271351Salc
495271351Salc			/*
496271351Salc			 * At least one reservation will fit between "leftcap"
497271351Salc			 * and "rightcap".  However, a reservation for the
498271351Salc			 * last of the requested pages will not fit.  Reduce
499271351Salc			 * the size of the upcoming allocation accordingly.
500271351Salc			 */
501228287Salc			allocpages = minpages;
502228287Salc		}
503228287Salc	}
504174982Salc
505174982Salc	/*
506228287Salc	 * Would the last new reservation extend past the end of the object?
507174982Salc	 */
508228287Salc	if (first + maxpages > object->size) {
509174982Salc		/*
510228287Salc		 * Don't allocate the last new reservation if the object is a
511228287Salc		 * vnode or backed by another object that is a vnode.
512174982Salc		 */
513174982Salc		if (object->type == OBJT_VNODE ||
514174982Salc		    (object->backing_object != NULL &&
515228287Salc		    object->backing_object->type == OBJT_VNODE)) {
516228287Salc			if (maxpages == VM_LEVEL_0_NPAGES)
517228287Salc				return (NULL);
518228287Salc			allocpages = minpages;
519228287Salc		}
520174982Salc		/* Speculate that the object may grow. */
521174982Salc	}
522174982Salc
523174982Salc	/*
524271351Salc	 * Allocate the physical pages.  The alignment and boundary specified
525271351Salc	 * for this allocation may be different from the alignment and
526271351Salc	 * boundary specified for the requested pages.  For instance, the
527271351Salc	 * specified index may not be the first page within the first new
528271351Salc	 * reservation.
529174982Salc	 */
530228287Salc	m = vm_phys_alloc_contig(allocpages, low, high, ulmax(alignment,
531228287Salc	    VM_LEVEL_0_SIZE), boundary > VM_LEVEL_0_SIZE ? boundary : 0);
532228287Salc	if (m == NULL)
533228287Salc		return (NULL);
534271351Salc
535271351Salc	/*
536271351Salc	 * The allocated physical pages always begin at a reservation
537271351Salc	 * boundary, but they do not always end at a reservation boundary.
538271351Salc	 * Initialize every reservation that is completely covered by the
539271351Salc	 * allocated physical pages.
540271351Salc	 */
541228287Salc	m_ret = NULL;
542228287Salc	index = VM_RESERV_INDEX(object, pindex);
543228287Salc	do {
544174982Salc		rv = vm_reserv_from_page(m);
545174982Salc		KASSERT(rv->pages == m,
546228287Salc		    ("vm_reserv_alloc_contig: reserv %p's pages is corrupted",
547174982Salc		    rv));
548174982Salc		KASSERT(rv->object == NULL,
549228287Salc		    ("vm_reserv_alloc_contig: reserv %p isn't free", rv));
550174982Salc		LIST_INSERT_HEAD(&object->rvq, rv, objq);
551174982Salc		rv->object = object;
552174982Salc		rv->pindex = first;
553174982Salc		KASSERT(rv->popcnt == 0,
554228287Salc		    ("vm_reserv_alloc_contig: reserv %p's popcnt is corrupted",
555174982Salc		    rv));
556174982Salc		KASSERT(!rv->inpartpopq,
557228287Salc		    ("vm_reserv_alloc_contig: reserv %p's inpartpopq is TRUE",
558174982Salc		    rv));
559259999Salc		for (i = 0; i < NPOPMAP; i++)
560259999Salc			KASSERT(rv->popmap[i] == 0,
561259999Salc		    ("vm_reserv_alloc_contig: reserv %p's popmap is corrupted",
562259999Salc			    rv));
563228287Salc		n = ulmin(VM_LEVEL_0_NPAGES - index, npages);
564228287Salc		for (i = 0; i < n; i++)
565259999Salc			vm_reserv_populate(rv, index + i);
566228287Salc		npages -= n;
567228287Salc		if (m_ret == NULL) {
568228287Salc			m_ret = &rv->pages[index];
569228287Salc			index = 0;
570228287Salc		}
571228287Salc		m += VM_LEVEL_0_NPAGES;
572228287Salc		first += VM_LEVEL_0_NPAGES;
573228287Salc		allocpages -= VM_LEVEL_0_NPAGES;
574271351Salc	} while (allocpages >= VM_LEVEL_0_NPAGES);
575228287Salc	return (m_ret);
576228287Salc
577228287Salc	/*
578228287Salc	 * Found a matching reservation.
579228287Salc	 */
580228287Salcfound:
581228287Salc	index = VM_RESERV_INDEX(object, pindex);
582228287Salc	/* Does the allocation fit within the reservation? */
583228287Salc	if (index + npages > VM_LEVEL_0_NPAGES)
584228287Salc		return (NULL);
585228287Salc	m = &rv->pages[index];
586228287Salc	pa = VM_PAGE_TO_PHYS(m);
587228287Salc	if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
588228287Salc	    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
589228287Salc		return (NULL);
590228287Salc	/* Handle vm_page_rename(m, new_object, ...). */
591228287Salc	for (i = 0; i < npages; i++)
592267364Salc		if (popmap_is_set(rv->popmap, index + i))
593228287Salc			return (NULL);
594228287Salc	for (i = 0; i < npages; i++)
595259999Salc		vm_reserv_populate(rv, index + i);
596228287Salc	return (m);
597228287Salc}
598228287Salc
599228287Salc/*
600228287Salc * Allocates a page from an existing or newly-created reservation.
601228287Salc *
602250577Salc * The page "mpred" must immediately precede the offset "pindex" within the
603250577Salc * specified object.
604250577Salc *
605228287Salc * The object and free page queue must be locked.
606228287Salc */
607228287Salcvm_page_t
608250577Salcvm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, vm_page_t mpred)
609228287Salc{
610250577Salc	vm_page_t m, msucc;
611228287Salc	vm_pindex_t first, leftcap, rightcap;
612228287Salc	vm_reserv_t rv;
613259999Salc	int i, index;
614228287Salc
615228287Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
616248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(object);
617228287Salc
618228287Salc	/*
619228287Salc	 * Is a reservation fundamentally impossible?
620228287Salc	 */
621228287Salc	if (pindex < VM_RESERV_INDEX(object, pindex) ||
622228287Salc	    pindex >= object->size)
623228287Salc		return (NULL);
624228287Salc
625228287Salc	/*
626228287Salc	 * Look for an existing reservation.
627228287Salc	 */
628248449Sattilio	if (mpred != NULL) {
629255626Skib		KASSERT(mpred->object == object,
630250577Salc		    ("vm_reserv_alloc_page: object doesn't contain mpred"));
631248449Sattilio		KASSERT(mpred->pindex < pindex,
632250577Salc		    ("vm_reserv_alloc_page: mpred doesn't precede pindex"));
633228287Salc		rv = vm_reserv_from_page(mpred);
634228287Salc		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
635228287Salc			goto found;
636248449Sattilio		msucc = TAILQ_NEXT(mpred, listq);
637248449Sattilio	} else
638248449Sattilio		msucc = TAILQ_FIRST(&object->memq);
639248449Sattilio	if (msucc != NULL) {
640248449Sattilio		KASSERT(msucc->pindex > pindex,
641250577Salc		    ("vm_reserv_alloc_page: msucc doesn't succeed pindex"));
642248449Sattilio		rv = vm_reserv_from_page(msucc);
643248449Sattilio		if (rv->object == object && vm_reserv_has_pindex(rv, pindex))
644248449Sattilio			goto found;
645174982Salc	}
646228287Salc
647228287Salc	/*
648228287Salc	 * Could a reservation fit between the first index to the left that
649228287Salc	 * can be used and the first index to the right that cannot be used?
650228287Salc	 */
651228287Salc	first = pindex - VM_RESERV_INDEX(object, pindex);
652228287Salc	if (mpred != NULL) {
653228287Salc		if ((rv = vm_reserv_from_page(mpred))->object != object)
654228287Salc			leftcap = mpred->pindex + 1;
655228287Salc		else
656228287Salc			leftcap = rv->pindex + VM_LEVEL_0_NPAGES;
657228287Salc		if (leftcap > first)
658228287Salc			return (NULL);
659228287Salc	}
660228287Salc	if (msucc != NULL) {
661228287Salc		if ((rv = vm_reserv_from_page(msucc))->object != object)
662228287Salc			rightcap = msucc->pindex;
663228287Salc		else
664228287Salc			rightcap = rv->pindex;
665228287Salc		if (first + VM_LEVEL_0_NPAGES > rightcap)
666228287Salc			return (NULL);
667228287Salc	}
668228287Salc
669228287Salc	/*
670228287Salc	 * Would a new reservation extend past the end of the object?
671228287Salc	 */
672228287Salc	if (first + VM_LEVEL_0_NPAGES > object->size) {
673228287Salc		/*
674228287Salc		 * Don't allocate a new reservation if the object is a vnode or
675228287Salc		 * backed by another object that is a vnode.
676228287Salc		 */
677228287Salc		if (object->type == OBJT_VNODE ||
678228287Salc		    (object->backing_object != NULL &&
679228287Salc		    object->backing_object->type == OBJT_VNODE))
680228287Salc			return (NULL);
681228287Salc		/* Speculate that the object may grow. */
682228287Salc	}
683228287Salc
684228287Salc	/*
685228287Salc	 * Allocate and populate the new reservation.
686228287Salc	 */
687228287Salc	m = vm_phys_alloc_pages(VM_FREEPOOL_DEFAULT, VM_LEVEL_0_ORDER);
688228287Salc	if (m == NULL)
689228287Salc		return (NULL);
690228287Salc	rv = vm_reserv_from_page(m);
691228287Salc	KASSERT(rv->pages == m,
692228287Salc	    ("vm_reserv_alloc_page: reserv %p's pages is corrupted", rv));
693228287Salc	KASSERT(rv->object == NULL,
694228287Salc	    ("vm_reserv_alloc_page: reserv %p isn't free", rv));
695228287Salc	LIST_INSERT_HEAD(&object->rvq, rv, objq);
696228287Salc	rv->object = object;
697228287Salc	rv->pindex = first;
698228287Salc	KASSERT(rv->popcnt == 0,
699228287Salc	    ("vm_reserv_alloc_page: reserv %p's popcnt is corrupted", rv));
700228287Salc	KASSERT(!rv->inpartpopq,
701228287Salc	    ("vm_reserv_alloc_page: reserv %p's inpartpopq is TRUE", rv));
702259999Salc	for (i = 0; i < NPOPMAP; i++)
703259999Salc		KASSERT(rv->popmap[i] == 0,
704259999Salc		    ("vm_reserv_alloc_page: reserv %p's popmap is corrupted",
705259999Salc		    rv));
706259999Salc	index = VM_RESERV_INDEX(object, pindex);
707259999Salc	vm_reserv_populate(rv, index);
708259999Salc	return (&rv->pages[index]);
709228287Salc
710228287Salc	/*
711228287Salc	 * Found a matching reservation.
712228287Salc	 */
713228287Salcfound:
714259999Salc	index = VM_RESERV_INDEX(object, pindex);
715259999Salc	m = &rv->pages[index];
716228287Salc	/* Handle vm_page_rename(m, new_object, ...). */
717267364Salc	if (popmap_is_set(rv->popmap, index))
718228287Salc		return (NULL);
719259999Salc	vm_reserv_populate(rv, index);
720174982Salc	return (m);
721174982Salc}
722174982Salc
723174982Salc/*
724259999Salc * Breaks the given reservation.  Except for the specified cached or free
725259999Salc * page, all cached and free pages in the reservation are returned to the
726259999Salc * physical memory allocator.  The reservation's population count and map are
727259999Salc * reset to their initial state.
728259999Salc *
729259999Salc * The given reservation must not be in the partially-populated reservation
730259999Salc * queue.  The free page queue lock must be held.
731259999Salc */
732259999Salcstatic void
733259999Salcvm_reserv_break(vm_reserv_t rv, vm_page_t m)
734259999Salc{
735259999Salc	int begin_zeroes, hi, i, lo;
736259999Salc
737259999Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
738259999Salc	KASSERT(rv->object != NULL,
739259999Salc	    ("vm_reserv_break: reserv %p is free", rv));
740259999Salc	KASSERT(!rv->inpartpopq,
741259999Salc	    ("vm_reserv_break: reserv %p's inpartpopq is TRUE", rv));
742259999Salc	LIST_REMOVE(rv, objq);
743259999Salc	rv->object = NULL;
744259999Salc	if (m != NULL) {
745259999Salc		/*
746259999Salc		 * Since the reservation is being broken, there is no harm in
747259999Salc		 * abusing the population map to stop "m" from being returned
748259999Salc		 * to the physical memory allocator.
749259999Salc		 */
750259999Salc		i = m - rv->pages;
751267364Salc		KASSERT(popmap_is_clear(rv->popmap, i),
752259999Salc		    ("vm_reserv_break: reserv %p's popmap is corrupted", rv));
753267364Salc		popmap_set(rv->popmap, i);
754259999Salc		rv->popcnt++;
755259999Salc	}
756259999Salc	i = hi = 0;
757259999Salc	do {
758259999Salc		/* Find the next 0 bit.  Any previous 0 bits are < "hi". */
759259999Salc		lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
760259999Salc		if (lo == 0) {
761259999Salc			/* Redundantly clears bits < "hi". */
762259999Salc			rv->popmap[i] = 0;
763259999Salc			rv->popcnt -= NBPOPMAP - hi;
764259999Salc			while (++i < NPOPMAP) {
765259999Salc				lo = ffsl(~rv->popmap[i]);
766259999Salc				if (lo == 0) {
767259999Salc					rv->popmap[i] = 0;
768259999Salc					rv->popcnt -= NBPOPMAP;
769259999Salc				} else
770259999Salc					break;
771259999Salc			}
772259999Salc			if (i == NPOPMAP)
773259999Salc				break;
774259999Salc			hi = 0;
775259999Salc		}
776259999Salc		KASSERT(lo > 0, ("vm_reserv_break: lo is %d", lo));
777259999Salc		/* Convert from ffsl() to ordinary bit numbering. */
778259999Salc		lo--;
779259999Salc		if (lo > 0) {
780259999Salc			/* Redundantly clears bits < "hi". */
781259999Salc			rv->popmap[i] &= ~((1UL << lo) - 1);
782259999Salc			rv->popcnt -= lo - hi;
783259999Salc		}
784259999Salc		begin_zeroes = NBPOPMAP * i + lo;
785259999Salc		/* Find the next 1 bit. */
786259999Salc		do
787259999Salc			hi = ffsl(rv->popmap[i]);
788259999Salc		while (hi == 0 && ++i < NPOPMAP);
789259999Salc		if (i != NPOPMAP)
790259999Salc			/* Convert from ffsl() to ordinary bit numbering. */
791259999Salc			hi--;
792259999Salc		vm_phys_free_contig(&rv->pages[begin_zeroes], NBPOPMAP * i +
793259999Salc		    hi - begin_zeroes);
794259999Salc	} while (i < NPOPMAP);
795259999Salc	KASSERT(rv->popcnt == 0,
796259999Salc	    ("vm_reserv_break: reserv %p's popcnt is corrupted", rv));
797259999Salc	vm_reserv_broken++;
798259999Salc}
799259999Salc
800259999Salc/*
801174982Salc * Breaks all reservations belonging to the given object.
802174982Salc */
803174982Salcvoid
804174982Salcvm_reserv_break_all(vm_object_t object)
805174982Salc{
806174982Salc	vm_reserv_t rv;
807174982Salc
808174982Salc	mtx_lock(&vm_page_queue_free_mtx);
809174982Salc	while ((rv = LIST_FIRST(&object->rvq)) != NULL) {
810174982Salc		KASSERT(rv->object == object,
811174982Salc		    ("vm_reserv_break_all: reserv %p is corrupted", rv));
812174982Salc		if (rv->inpartpopq) {
813174982Salc			TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
814174982Salc			rv->inpartpopq = FALSE;
815174982Salc		}
816259999Salc		vm_reserv_break(rv, NULL);
817174982Salc	}
818174982Salc	mtx_unlock(&vm_page_queue_free_mtx);
819174982Salc}
820174982Salc
821174982Salc/*
822174982Salc * Frees the given page if it belongs to a reservation.  Returns TRUE if the
823174982Salc * page is freed and FALSE otherwise.
824174982Salc *
825174982Salc * The free page queue lock must be held.
826174982Salc */
827174982Salcboolean_t
828174982Salcvm_reserv_free_page(vm_page_t m)
829174982Salc{
830174982Salc	vm_reserv_t rv;
831174982Salc
832174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
833174982Salc	rv = vm_reserv_from_page(m);
834234038Salc	if (rv->object == NULL)
835234038Salc		return (FALSE);
836259999Salc	vm_reserv_depopulate(rv, m - rv->pages);
837234038Salc	return (TRUE);
838174982Salc}
839174982Salc
840174982Salc/*
841174982Salc * Initializes the reservation management system.  Specifically, initializes
842174982Salc * the reservation array.
843174982Salc *
844174982Salc * Requires that vm_page_array and first_page are initialized!
845174982Salc */
846174982Salcvoid
847174982Salcvm_reserv_init(void)
848174982Salc{
849174982Salc	vm_paddr_t paddr;
850274867Salc	struct vm_phys_seg *seg;
851274867Salc	int segind;
852174982Salc
853174982Salc	/*
854174982Salc	 * Initialize the reservation array.  Specifically, initialize the
855174982Salc	 * "pages" field for every element that has an underlying superpage.
856174982Salc	 */
857274867Salc	for (segind = 0; segind < vm_phys_nsegs; segind++) {
858274867Salc		seg = &vm_phys_segs[segind];
859274867Salc		paddr = roundup2(seg->start, VM_LEVEL_0_SIZE);
860274867Salc		while (paddr + VM_LEVEL_0_SIZE <= seg->end) {
861174982Salc			vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT].pages =
862174982Salc			    PHYS_TO_VM_PAGE(paddr);
863174982Salc			paddr += VM_LEVEL_0_SIZE;
864174982Salc		}
865174982Salc	}
866174982Salc}
867174982Salc
868174982Salc/*
869292469Salc * Returns true if the given page belongs to a reservation and that page is
870292469Salc * free.  Otherwise, returns false.
871292469Salc */
872292469Salcbool
873292469Salcvm_reserv_is_page_free(vm_page_t m)
874292469Salc{
875292469Salc	vm_reserv_t rv;
876292469Salc
877292469Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
878292469Salc	rv = vm_reserv_from_page(m);
879292469Salc	if (rv->object == NULL)
880292469Salc		return (false);
881292469Salc	return (popmap_is_clear(rv->popmap, m - rv->pages));
882292469Salc}
883292469Salc
884292469Salc/*
885292469Salc * If the given page belongs to a reservation, returns the level of that
886292469Salc * reservation.  Otherwise, returns -1.
887292469Salc */
888292469Salcint
889292469Salcvm_reserv_level(vm_page_t m)
890292469Salc{
891292469Salc	vm_reserv_t rv;
892292469Salc
893292469Salc	rv = vm_reserv_from_page(m);
894292469Salc	return (rv->object != NULL ? 0 : -1);
895292469Salc}
896292469Salc
897292469Salc/*
898174982Salc * Returns a reservation level if the given page belongs to a fully-populated
899174982Salc * reservation and -1 otherwise.
900174982Salc */
901174982Salcint
902174982Salcvm_reserv_level_iffullpop(vm_page_t m)
903174982Salc{
904174982Salc	vm_reserv_t rv;
905174982Salc
906174982Salc	rv = vm_reserv_from_page(m);
907174982Salc	return (rv->popcnt == VM_LEVEL_0_NPAGES ? 0 : -1);
908174982Salc}
909174982Salc
910174982Salc/*
911174982Salc * Prepare for the reactivation of a cached page.
912174982Salc *
913174982Salc * First, suppose that the given page "m" was allocated individually, i.e., not
914174982Salc * as part of a reservation, and cached.  Then, suppose a reservation
915174982Salc * containing "m" is allocated by the same object.  Although "m" and the
916174982Salc * reservation belong to the same object, "m"'s pindex may not match the
917174982Salc * reservation's.
918174982Salc *
919174982Salc * The free page queue must be locked.
920174982Salc */
921174982Salcboolean_t
922174982Salcvm_reserv_reactivate_page(vm_page_t m)
923174982Salc{
924174982Salc	vm_reserv_t rv;
925259999Salc	int index;
926174982Salc
927174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
928174982Salc	rv = vm_reserv_from_page(m);
929174982Salc	if (rv->object == NULL)
930174982Salc		return (FALSE);
931174982Salc	KASSERT((m->flags & PG_CACHED) != 0,
932259999Salc	    ("vm_reserv_reactivate_page: page %p is not cached", m));
933174982Salc	if (m->object == rv->object &&
934259999Salc	    m->pindex - rv->pindex == (index = VM_RESERV_INDEX(m->object,
935259999Salc	    m->pindex)))
936259999Salc		vm_reserv_populate(rv, index);
937174982Salc	else {
938174982Salc		KASSERT(rv->inpartpopq,
939259999Salc	    ("vm_reserv_reactivate_page: reserv %p's inpartpopq is FALSE",
940174982Salc		    rv));
941174982Salc		TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
942174982Salc		rv->inpartpopq = FALSE;
943259999Salc		/* Don't release "m" to the physical memory allocator. */
944259999Salc		vm_reserv_break(rv, m);
945174982Salc	}
946174982Salc	return (TRUE);
947174982Salc}
948174982Salc
949174982Salc/*
950177956Salc * Breaks the given partially-populated reservation, releasing its cached and
951177956Salc * free pages to the physical memory allocator.
952177956Salc *
953177956Salc * The free page queue lock must be held.
954177956Salc */
955177956Salcstatic void
956177956Salcvm_reserv_reclaim(vm_reserv_t rv)
957177956Salc{
958177956Salc
959177956Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
960177956Salc	KASSERT(rv->inpartpopq,
961259999Salc	    ("vm_reserv_reclaim: reserv %p's inpartpopq is FALSE", rv));
962177956Salc	TAILQ_REMOVE(&vm_rvq_partpop, rv, partpopq);
963177956Salc	rv->inpartpopq = FALSE;
964259999Salc	vm_reserv_break(rv, NULL);
965177956Salc	vm_reserv_reclaimed++;
966177956Salc}
967177956Salc
968177956Salc/*
969174982Salc * Breaks the reservation at the head of the partially-populated reservation
970174982Salc * queue, releasing its cached and free pages to the physical memory
971174982Salc * allocator.  Returns TRUE if a reservation is broken and FALSE otherwise.
972174982Salc *
973174982Salc * The free page queue lock must be held.
974174982Salc */
975174982Salcboolean_t
976177956Salcvm_reserv_reclaim_inactive(void)
977174982Salc{
978174982Salc	vm_reserv_t rv;
979174982Salc
980174982Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
981174982Salc	if ((rv = TAILQ_FIRST(&vm_rvq_partpop)) != NULL) {
982177956Salc		vm_reserv_reclaim(rv);
983174982Salc		return (TRUE);
984174982Salc	}
985174982Salc	return (FALSE);
986174982Salc}
987174982Salc
988174982Salc/*
989177956Salc * Searches the partially-populated reservation queue for the least recently
990177956Salc * active reservation with unused pages, i.e., cached or free, that satisfy the
991177956Salc * given request for contiguous physical memory.  If a satisfactory reservation
992177956Salc * is found, it is broken.  Returns TRUE if a reservation is broken and FALSE
993177956Salc * otherwise.
994177956Salc *
995177956Salc * The free page queue lock must be held.
996177956Salc */
997177956Salcboolean_t
998228287Salcvm_reserv_reclaim_contig(u_long npages, vm_paddr_t low, vm_paddr_t high,
999226928Salc    u_long alignment, vm_paddr_t boundary)
1000177956Salc{
1001259999Salc	vm_paddr_t pa, size;
1002177956Salc	vm_reserv_t rv;
1003291370Salc	int hi, i, lo, low_index, next_free;
1004177956Salc
1005177956Salc	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1006228287Salc	if (npages > VM_LEVEL_0_NPAGES - 1)
1007177956Salc		return (FALSE);
1008228287Salc	size = npages << PAGE_SHIFT;
1009177956Salc	TAILQ_FOREACH(rv, &vm_rvq_partpop, partpopq) {
1010177956Salc		pa = VM_PAGE_TO_PHYS(&rv->pages[VM_LEVEL_0_NPAGES - 1]);
1011177956Salc		if (pa + PAGE_SIZE - size < low) {
1012259999Salc			/* This entire reservation is too low; go to next. */
1013177956Salc			continue;
1014177956Salc		}
1015259999Salc		pa = VM_PAGE_TO_PHYS(&rv->pages[0]);
1016259999Salc		if (pa + size > high) {
1017259999Salc			/* This entire reservation is too high; go to next. */
1018259999Salc			continue;
1019259999Salc		}
1020259999Salc		if (pa < low) {
1021259999Salc			/* Start the search for free pages at "low". */
1022291370Salc			low_index = (low + PAGE_MASK - pa) >> PAGE_SHIFT;
1023291370Salc			i = low_index / NBPOPMAP;
1024291370Salc			hi = low_index % NBPOPMAP;
1025259999Salc		} else
1026259999Salc			i = hi = 0;
1027259999Salc		do {
1028259999Salc			/* Find the next free page. */
1029259999Salc			lo = ffsl(~(((1UL << hi) - 1) | rv->popmap[i]));
1030259999Salc			while (lo == 0 && ++i < NPOPMAP)
1031259999Salc				lo = ffsl(~rv->popmap[i]);
1032259999Salc			if (i == NPOPMAP)
1033259999Salc				break;
1034259999Salc			/* Convert from ffsl() to ordinary bit numbering. */
1035259999Salc			lo--;
1036259999Salc			next_free = NBPOPMAP * i + lo;
1037259999Salc			pa = VM_PAGE_TO_PHYS(&rv->pages[next_free]);
1038259999Salc			KASSERT(pa >= low,
1039259999Salc			    ("vm_reserv_reclaim_contig: pa is too low"));
1040259999Salc			if (pa + size > high) {
1041259999Salc				/* The rest of this reservation is too high. */
1042259999Salc				break;
1043259999Salc			} else if ((pa & (alignment - 1)) != 0 ||
1044259999Salc			    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0) {
1045281444Salc				/*
1046281444Salc				 * The current page doesn't meet the alignment
1047281444Salc				 * and/or boundary requirements.  Continue
1048281444Salc				 * searching this reservation until the rest
1049281444Salc				 * of its free pages are either excluded or
1050281444Salc				 * exhausted.
1051281444Salc				 */
1052281444Salc				hi = lo + 1;
1053281444Salc				if (hi >= NBPOPMAP) {
1054281444Salc					hi = 0;
1055281444Salc					i++;
1056281444Salc				}
1057259999Salc				continue;
1058259999Salc			}
1059259999Salc			/* Find the next used page. */
1060259999Salc			hi = ffsl(rv->popmap[i] & ~((1UL << lo) - 1));
1061259999Salc			while (hi == 0 && ++i < NPOPMAP) {
1062259999Salc				if ((NBPOPMAP * i - next_free) * PAGE_SIZE >=
1063259999Salc				    size) {
1064177956Salc					vm_reserv_reclaim(rv);
1065177956Salc					return (TRUE);
1066177956Salc				}
1067259999Salc				hi = ffsl(rv->popmap[i]);
1068259999Salc			}
1069259999Salc			/* Convert from ffsl() to ordinary bit numbering. */
1070259999Salc			if (i != NPOPMAP)
1071259999Salc				hi--;
1072259999Salc			if ((NBPOPMAP * i + hi - next_free) * PAGE_SIZE >=
1073259999Salc			    size) {
1074259999Salc				vm_reserv_reclaim(rv);
1075259999Salc				return (TRUE);
1076259999Salc			}
1077259999Salc		} while (i < NPOPMAP);
1078177956Salc	}
1079177956Salc	return (FALSE);
1080177956Salc}
1081177956Salc
1082177956Salc/*
1083174982Salc * Transfers the reservation underlying the given page to a new object.
1084174982Salc *
1085174982Salc * The object must be locked.
1086174982Salc */
1087174982Salcvoid
1088174982Salcvm_reserv_rename(vm_page_t m, vm_object_t new_object, vm_object_t old_object,
1089174982Salc    vm_pindex_t old_object_offset)
1090174982Salc{
1091174982Salc	vm_reserv_t rv;
1092174982Salc
1093248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(new_object);
1094174982Salc	rv = vm_reserv_from_page(m);
1095174982Salc	if (rv->object == old_object) {
1096174982Salc		mtx_lock(&vm_page_queue_free_mtx);
1097174982Salc		if (rv->object == old_object) {
1098174982Salc			LIST_REMOVE(rv, objq);
1099174982Salc			LIST_INSERT_HEAD(&new_object->rvq, rv, objq);
1100174982Salc			rv->object = new_object;
1101174982Salc			rv->pindex -= old_object_offset;
1102174982Salc		}
1103174982Salc		mtx_unlock(&vm_page_queue_free_mtx);
1104174982Salc	}
1105174982Salc}
1106174982Salc
1107174982Salc/*
1108292469Salc * Returns the size (in bytes) of a reservation of the specified level.
1109292469Salc */
1110292469Salcint
1111292469Salcvm_reserv_size(int level)
1112292469Salc{
1113292469Salc
1114292469Salc	switch (level) {
1115292469Salc	case 0:
1116292469Salc		return (VM_LEVEL_0_SIZE);
1117292469Salc	case -1:
1118292469Salc		return (PAGE_SIZE);
1119292469Salc	default:
1120292469Salc		return (0);
1121292469Salc	}
1122292469Salc}
1123292469Salc
1124292469Salc/*
1125174982Salc * Allocates the virtual and physical memory required by the reservation
1126174982Salc * management system's data structures, in particular, the reservation array.
1127174982Salc */
1128174982Salcvm_paddr_t
1129174982Salcvm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end, vm_paddr_t high_water)
1130174982Salc{
1131174982Salc	vm_paddr_t new_end;
1132174982Salc	size_t size;
1133174982Salc
1134174982Salc	/*
1135174982Salc	 * Calculate the size (in bytes) of the reservation array.  Round up
1136174982Salc	 * from "high_water" because every small page is mapped to an element
1137174982Salc	 * in the reservation array based on its physical address.  Thus, the
1138174982Salc	 * number of elements in the reservation array can be greater than the
1139174982Salc	 * number of superpages.
1140174982Salc	 */
1141174982Salc	size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv);
1142174982Salc
1143174982Salc	/*
1144174982Salc	 * Allocate and map the physical memory for the reservation array.  The
1145174982Salc	 * next available virtual address is returned by reference.
1146174982Salc	 */
1147174982Salc	new_end = end - round_page(size);
1148174982Salc	vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end,
1149174982Salc	    VM_PROT_READ | VM_PROT_WRITE);
1150174982Salc	bzero(vm_reserv_array, size);
1151174982Salc
1152174982Salc	/*
1153174982Salc	 * Return the next available physical address.
1154174982Salc	 */
1155174982Salc	return (new_end);
1156174982Salc}
1157174982Salc
1158174982Salc#endif	/* VM_NRESERVLEVEL > 0 */
1159