vm_page.c revision 295222
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34 */
35
36/*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *			GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 *	- A page queue lock is required when adding or removing a page from a
67 *	  page queue regardless of other locks or the busy state of a page.
68 *
69 *		* In general, no thread besides the page daemon can acquire or
70 *		  hold more than one page queue lock at a time.
71 *
72 *		* The page daemon can acquire and hold any pair of page queue
73 *		  locks in any order.
74 *
75 *	- The object lock is required when inserting or removing
76 *	  pages from an object (vm_page_insert() or vm_page_remove()).
77 *
78 */
79
80/*
81 *	Resident memory management module.
82 */
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 295222 2016-02-03 23:30:17Z glebius $");
86
87#include "opt_vm.h"
88
89#include <sys/param.h>
90#include <sys/systm.h>
91#include <sys/lock.h>
92#include <sys/kernel.h>
93#include <sys/limits.h>
94#include <sys/linker.h>
95#include <sys/malloc.h>
96#include <sys/mman.h>
97#include <sys/msgbuf.h>
98#include <sys/mutex.h>
99#include <sys/proc.h>
100#include <sys/rwlock.h>
101#include <sys/sbuf.h>
102#include <sys/sysctl.h>
103#include <sys/vmmeter.h>
104#include <sys/vnode.h>
105#include <sys/taskqueue.h>
106
107#include <vm/vm.h>
108#include <vm/pmap.h>
109#include <vm/vm_param.h>
110#include <vm/vm_kern.h>
111#include <vm/vm_object.h>
112#include <vm/vm_page.h>
113#include <vm/vm_pageout.h>
114#include <vm/vm_pager.h>
115#include <vm/vm_phys.h>
116#include <vm/vm_radix.h>
117#include <vm/vm_reserv.h>
118#include <vm/vm_extern.h>
119#include <vm/uma.h>
120#include <vm/uma_int.h>
121
122#include <machine/md_var.h>
123
124/*
125 *	Associated with page of user-allocatable memory is a
126 *	page structure.
127 */
128
129struct vm_domain vm_dom[MAXMEMDOM];
130struct mtx_padalign vm_page_queue_free_mtx;
131
132struct mtx_padalign pa_lock[PA_LOCK_COUNT];
133
134vm_page_t vm_page_array;
135long vm_page_array_size;
136long first_page;
137int vm_page_zero_count;
138
139static int boot_pages = UMA_BOOT_PAGES;
140SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
141    &boot_pages, 0,
142    "number of pages allocated for bootstrapping the VM system");
143
144static int pa_tryrelock_restart;
145SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
146    &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
147
148static TAILQ_HEAD(, vm_page) blacklist_head;
149static int sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS);
150SYSCTL_PROC(_vm, OID_AUTO, page_blacklist, CTLTYPE_STRING | CTLFLAG_RD |
151    CTLFLAG_MPSAFE, NULL, 0, sysctl_vm_page_blacklist, "A", "Blacklist pages");
152
153/* Is the page daemon waiting for free pages? */
154static int vm_pageout_pages_needed;
155
156static uma_zone_t fakepg_zone;
157
158static struct vnode *vm_page_alloc_init(vm_page_t m);
159static void vm_page_cache_turn_free(vm_page_t m);
160static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
161static void vm_page_enqueue(uint8_t queue, vm_page_t m);
162static void vm_page_free_wakeup(void);
163static void vm_page_init_fakepg(void *dummy);
164static int vm_page_insert_after(vm_page_t m, vm_object_t object,
165    vm_pindex_t pindex, vm_page_t mpred);
166static void vm_page_insert_radixdone(vm_page_t m, vm_object_t object,
167    vm_page_t mpred);
168static int vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
169    vm_paddr_t high);
170
171SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
172
173static void
174vm_page_init_fakepg(void *dummy)
175{
176
177	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
178	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
179}
180
181/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
182#if PAGE_SIZE == 32768
183#ifdef CTASSERT
184CTASSERT(sizeof(u_long) >= 8);
185#endif
186#endif
187
188/*
189 * Try to acquire a physical address lock while a pmap is locked.  If we
190 * fail to trylock we unlock and lock the pmap directly and cache the
191 * locked pa in *locked.  The caller should then restart their loop in case
192 * the virtual to physical mapping has changed.
193 */
194int
195vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
196{
197	vm_paddr_t lockpa;
198
199	lockpa = *locked;
200	*locked = pa;
201	if (lockpa) {
202		PA_LOCK_ASSERT(lockpa, MA_OWNED);
203		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
204			return (0);
205		PA_UNLOCK(lockpa);
206	}
207	if (PA_TRYLOCK(pa))
208		return (0);
209	PMAP_UNLOCK(pmap);
210	atomic_add_int(&pa_tryrelock_restart, 1);
211	PA_LOCK(pa);
212	PMAP_LOCK(pmap);
213	return (EAGAIN);
214}
215
216/*
217 *	vm_set_page_size:
218 *
219 *	Sets the page size, perhaps based upon the memory
220 *	size.  Must be called before any use of page-size
221 *	dependent functions.
222 */
223void
224vm_set_page_size(void)
225{
226	if (vm_cnt.v_page_size == 0)
227		vm_cnt.v_page_size = PAGE_SIZE;
228	if (((vm_cnt.v_page_size - 1) & vm_cnt.v_page_size) != 0)
229		panic("vm_set_page_size: page size not a power of two");
230}
231
232/*
233 *	vm_page_blacklist_next:
234 *
235 *	Find the next entry in the provided string of blacklist
236 *	addresses.  Entries are separated by space, comma, or newline.
237 *	If an invalid integer is encountered then the rest of the
238 *	string is skipped.  Updates the list pointer to the next
239 *	character, or NULL if the string is exhausted or invalid.
240 */
241static vm_paddr_t
242vm_page_blacklist_next(char **list, char *end)
243{
244	vm_paddr_t bad;
245	char *cp, *pos;
246
247	if (list == NULL || *list == NULL)
248		return (0);
249	if (**list =='\0') {
250		*list = NULL;
251		return (0);
252	}
253
254	/*
255	 * If there's no end pointer then the buffer is coming from
256	 * the kenv and we know it's null-terminated.
257	 */
258	if (end == NULL)
259		end = *list + strlen(*list);
260
261	/* Ensure that strtoq() won't walk off the end */
262	if (*end != '\0') {
263		if (*end == '\n' || *end == ' ' || *end  == ',')
264			*end = '\0';
265		else {
266			printf("Blacklist not terminated, skipping\n");
267			*list = NULL;
268			return (0);
269		}
270	}
271
272	for (pos = *list; *pos != '\0'; pos = cp) {
273		bad = strtoq(pos, &cp, 0);
274		if (*cp == '\0' || *cp == ' ' || *cp == ',' || *cp == '\n') {
275			if (bad == 0) {
276				if (++cp < end)
277					continue;
278				else
279					break;
280			}
281		} else
282			break;
283		if (*cp == '\0' || ++cp >= end)
284			*list = NULL;
285		else
286			*list = cp;
287		return (trunc_page(bad));
288	}
289	printf("Garbage in RAM blacklist, skipping\n");
290	*list = NULL;
291	return (0);
292}
293
294/*
295 *	vm_page_blacklist_check:
296 *
297 *	Iterate through the provided string of blacklist addresses, pulling
298 *	each entry out of the physical allocator free list and putting it
299 *	onto a list for reporting via the vm.page_blacklist sysctl.
300 */
301static void
302vm_page_blacklist_check(char *list, char *end)
303{
304	vm_paddr_t pa;
305	vm_page_t m;
306	char *next;
307	int ret;
308
309	next = list;
310	while (next != NULL) {
311		if ((pa = vm_page_blacklist_next(&next, end)) == 0)
312			continue;
313		m = vm_phys_paddr_to_vm_page(pa);
314		if (m == NULL)
315			continue;
316		mtx_lock(&vm_page_queue_free_mtx);
317		ret = vm_phys_unfree_page(m);
318		mtx_unlock(&vm_page_queue_free_mtx);
319		if (ret == TRUE) {
320			TAILQ_INSERT_TAIL(&blacklist_head, m, listq);
321			if (bootverbose)
322				printf("Skipping page with pa 0x%jx\n",
323				    (uintmax_t)pa);
324		}
325	}
326}
327
328/*
329 *	vm_page_blacklist_load:
330 *
331 *	Search for a special module named "ram_blacklist".  It'll be a
332 *	plain text file provided by the user via the loader directive
333 *	of the same name.
334 */
335static void
336vm_page_blacklist_load(char **list, char **end)
337{
338	void *mod;
339	u_char *ptr;
340	u_int len;
341
342	mod = NULL;
343	ptr = NULL;
344
345	mod = preload_search_by_type("ram_blacklist");
346	if (mod != NULL) {
347		ptr = preload_fetch_addr(mod);
348		len = preload_fetch_size(mod);
349        }
350	*list = ptr;
351	if (ptr != NULL)
352		*end = ptr + len;
353	else
354		*end = NULL;
355	return;
356}
357
358static int
359sysctl_vm_page_blacklist(SYSCTL_HANDLER_ARGS)
360{
361	vm_page_t m;
362	struct sbuf sbuf;
363	int error, first;
364
365	first = 1;
366	error = sysctl_wire_old_buffer(req, 0);
367	if (error != 0)
368		return (error);
369	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
370	TAILQ_FOREACH(m, &blacklist_head, listq) {
371		sbuf_printf(&sbuf, "%s%#jx", first ? "" : ",",
372		    (uintmax_t)m->phys_addr);
373		first = 0;
374	}
375	error = sbuf_finish(&sbuf);
376	sbuf_delete(&sbuf);
377	return (error);
378}
379
380static void
381vm_page_domain_init(struct vm_domain *vmd)
382{
383	struct vm_pagequeue *pq;
384	int i;
385
386	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_name) =
387	    "vm inactive pagequeue";
388	*__DECONST(int **, &vmd->vmd_pagequeues[PQ_INACTIVE].pq_vcnt) =
389	    &vm_cnt.v_inactive_count;
390	*__DECONST(char **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_name) =
391	    "vm active pagequeue";
392	*__DECONST(int **, &vmd->vmd_pagequeues[PQ_ACTIVE].pq_vcnt) =
393	    &vm_cnt.v_active_count;
394	vmd->vmd_page_count = 0;
395	vmd->vmd_free_count = 0;
396	vmd->vmd_segs = 0;
397	vmd->vmd_oom = FALSE;
398	vmd->vmd_pass = 0;
399	for (i = 0; i < PQ_COUNT; i++) {
400		pq = &vmd->vmd_pagequeues[i];
401		TAILQ_INIT(&pq->pq_pl);
402		mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
403		    MTX_DEF | MTX_DUPOK);
404	}
405}
406
407/*
408 *	vm_page_startup:
409 *
410 *	Initializes the resident memory module.
411 *
412 *	Allocates memory for the page cells, and
413 *	for the object/offset-to-page hash table headers.
414 *	Each page cell is initialized and placed on the free list.
415 */
416vm_offset_t
417vm_page_startup(vm_offset_t vaddr)
418{
419	vm_offset_t mapped;
420	vm_paddr_t page_range;
421	vm_paddr_t new_end;
422	int i;
423	vm_paddr_t pa;
424	vm_paddr_t last_pa;
425	char *list, *listend;
426	vm_paddr_t end;
427	vm_paddr_t biggestsize;
428	vm_paddr_t low_water, high_water;
429	int biggestone;
430
431	biggestsize = 0;
432	biggestone = 0;
433	vaddr = round_page(vaddr);
434
435	for (i = 0; phys_avail[i + 1]; i += 2) {
436		phys_avail[i] = round_page(phys_avail[i]);
437		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
438	}
439
440	low_water = phys_avail[0];
441	high_water = phys_avail[1];
442
443	for (i = 0; i < vm_phys_nsegs; i++) {
444		if (vm_phys_segs[i].start < low_water)
445			low_water = vm_phys_segs[i].start;
446		if (vm_phys_segs[i].end > high_water)
447			high_water = vm_phys_segs[i].end;
448	}
449	for (i = 0; phys_avail[i + 1]; i += 2) {
450		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
451
452		if (size > biggestsize) {
453			biggestone = i;
454			biggestsize = size;
455		}
456		if (phys_avail[i] < low_water)
457			low_water = phys_avail[i];
458		if (phys_avail[i + 1] > high_water)
459			high_water = phys_avail[i + 1];
460	}
461
462	end = phys_avail[biggestone+1];
463
464	/*
465	 * Initialize the page and queue locks.
466	 */
467	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
468	for (i = 0; i < PA_LOCK_COUNT; i++)
469		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
470	for (i = 0; i < vm_ndomains; i++)
471		vm_page_domain_init(&vm_dom[i]);
472
473	/*
474	 * Allocate memory for use when boot strapping the kernel memory
475	 * allocator.
476	 *
477	 * CTFLAG_RDTUN doesn't work during the early boot process, so we must
478	 * manually fetch the value.
479	 */
480	TUNABLE_INT_FETCH("vm.boot_pages", &boot_pages);
481	new_end = end - (boot_pages * UMA_SLAB_SIZE);
482	new_end = trunc_page(new_end);
483	mapped = pmap_map(&vaddr, new_end, end,
484	    VM_PROT_READ | VM_PROT_WRITE);
485	bzero((void *)mapped, end - new_end);
486	uma_startup((void *)mapped, boot_pages);
487
488#if defined(__aarch64__) || defined(__amd64__) || defined(__arm__) || \
489    defined(__i386__) || defined(__mips__)
490	/*
491	 * Allocate a bitmap to indicate that a random physical page
492	 * needs to be included in a minidump.
493	 *
494	 * The amd64 port needs this to indicate which direct map pages
495	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
496	 *
497	 * However, i386 still needs this workspace internally within the
498	 * minidump code.  In theory, they are not needed on i386, but are
499	 * included should the sf_buf code decide to use them.
500	 */
501	last_pa = 0;
502	for (i = 0; dump_avail[i + 1] != 0; i += 2)
503		if (dump_avail[i + 1] > last_pa)
504			last_pa = dump_avail[i + 1];
505	page_range = last_pa / PAGE_SIZE;
506	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
507	new_end -= vm_page_dump_size;
508	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
509	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
510	bzero((void *)vm_page_dump, vm_page_dump_size);
511#endif
512#ifdef __amd64__
513	/*
514	 * Request that the physical pages underlying the message buffer be
515	 * included in a crash dump.  Since the message buffer is accessed
516	 * through the direct map, they are not automatically included.
517	 */
518	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
519	last_pa = pa + round_page(msgbufsize);
520	while (pa < last_pa) {
521		dump_add_page(pa);
522		pa += PAGE_SIZE;
523	}
524#endif
525	/*
526	 * Compute the number of pages of memory that will be available for
527	 * use (taking into account the overhead of a page structure per
528	 * page).
529	 */
530	first_page = low_water / PAGE_SIZE;
531#ifdef VM_PHYSSEG_SPARSE
532	page_range = 0;
533	for (i = 0; i < vm_phys_nsegs; i++) {
534		page_range += atop(vm_phys_segs[i].end -
535		    vm_phys_segs[i].start);
536	}
537	for (i = 0; phys_avail[i + 1] != 0; i += 2)
538		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
539#elif defined(VM_PHYSSEG_DENSE)
540	page_range = high_water / PAGE_SIZE - first_page;
541#else
542#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
543#endif
544	end = new_end;
545
546	/*
547	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
548	 */
549	vaddr += PAGE_SIZE;
550
551	/*
552	 * Initialize the mem entry structures now, and put them in the free
553	 * queue.
554	 */
555	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
556	mapped = pmap_map(&vaddr, new_end, end,
557	    VM_PROT_READ | VM_PROT_WRITE);
558	vm_page_array = (vm_page_t) mapped;
559#if VM_NRESERVLEVEL > 0
560	/*
561	 * Allocate memory for the reservation management system's data
562	 * structures.
563	 */
564	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
565#endif
566#if defined(__aarch64__) || defined(__amd64__) || defined(__mips__)
567	/*
568	 * pmap_map on arm64, amd64, and mips can come out of the direct-map,
569	 * not kvm like i386, so the pages must be tracked for a crashdump to
570	 * include this data.  This includes the vm_page_array and the early
571	 * UMA bootstrap pages.
572	 */
573	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
574		dump_add_page(pa);
575#endif
576	phys_avail[biggestone + 1] = new_end;
577
578	/*
579	 * Add physical memory segments corresponding to the available
580	 * physical pages.
581	 */
582	for (i = 0; phys_avail[i + 1] != 0; i += 2)
583		vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]);
584
585	/*
586	 * Clear all of the page structures
587	 */
588	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
589	for (i = 0; i < page_range; i++)
590		vm_page_array[i].order = VM_NFREEORDER;
591	vm_page_array_size = page_range;
592
593	/*
594	 * Initialize the physical memory allocator.
595	 */
596	vm_phys_init();
597
598	/*
599	 * Add every available physical page that is not blacklisted to
600	 * the free lists.
601	 */
602	vm_cnt.v_page_count = 0;
603	vm_cnt.v_free_count = 0;
604	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
605		pa = phys_avail[i];
606		last_pa = phys_avail[i + 1];
607		while (pa < last_pa) {
608			vm_phys_add_page(pa);
609			pa += PAGE_SIZE;
610		}
611	}
612
613	TAILQ_INIT(&blacklist_head);
614	vm_page_blacklist_load(&list, &listend);
615	vm_page_blacklist_check(list, listend);
616
617	list = kern_getenv("vm.blacklist");
618	vm_page_blacklist_check(list, NULL);
619
620	freeenv(list);
621#if VM_NRESERVLEVEL > 0
622	/*
623	 * Initialize the reservation management system.
624	 */
625	vm_reserv_init();
626#endif
627	return (vaddr);
628}
629
630void
631vm_page_reference(vm_page_t m)
632{
633
634	vm_page_aflag_set(m, PGA_REFERENCED);
635}
636
637/*
638 *	vm_page_busy_downgrade:
639 *
640 *	Downgrade an exclusive busy page into a single shared busy page.
641 */
642void
643vm_page_busy_downgrade(vm_page_t m)
644{
645	u_int x;
646
647	vm_page_assert_xbusied(m);
648
649	for (;;) {
650		x = m->busy_lock;
651		x &= VPB_BIT_WAITERS;
652		if (atomic_cmpset_rel_int(&m->busy_lock,
653		    VPB_SINGLE_EXCLUSIVER | x, VPB_SHARERS_WORD(1) | x))
654			break;
655	}
656}
657
658/*
659 *	vm_page_sbusied:
660 *
661 *	Return a positive value if the page is shared busied, 0 otherwise.
662 */
663int
664vm_page_sbusied(vm_page_t m)
665{
666	u_int x;
667
668	x = m->busy_lock;
669	return ((x & VPB_BIT_SHARED) != 0 && x != VPB_UNBUSIED);
670}
671
672/*
673 *	vm_page_sunbusy:
674 *
675 *	Shared unbusy a page.
676 */
677void
678vm_page_sunbusy(vm_page_t m)
679{
680	u_int x;
681
682	vm_page_assert_sbusied(m);
683
684	for (;;) {
685		x = m->busy_lock;
686		if (VPB_SHARERS(x) > 1) {
687			if (atomic_cmpset_int(&m->busy_lock, x,
688			    x - VPB_ONE_SHARER))
689				break;
690			continue;
691		}
692		if ((x & VPB_BIT_WAITERS) == 0) {
693			KASSERT(x == VPB_SHARERS_WORD(1),
694			    ("vm_page_sunbusy: invalid lock state"));
695			if (atomic_cmpset_int(&m->busy_lock,
696			    VPB_SHARERS_WORD(1), VPB_UNBUSIED))
697				break;
698			continue;
699		}
700		KASSERT(x == (VPB_SHARERS_WORD(1) | VPB_BIT_WAITERS),
701		    ("vm_page_sunbusy: invalid lock state for waiters"));
702
703		vm_page_lock(m);
704		if (!atomic_cmpset_int(&m->busy_lock, x, VPB_UNBUSIED)) {
705			vm_page_unlock(m);
706			continue;
707		}
708		wakeup(m);
709		vm_page_unlock(m);
710		break;
711	}
712}
713
714/*
715 *	vm_page_busy_sleep:
716 *
717 *	Sleep and release the page lock, using the page pointer as wchan.
718 *	This is used to implement the hard-path of busying mechanism.
719 *
720 *	The given page must be locked.
721 */
722void
723vm_page_busy_sleep(vm_page_t m, const char *wmesg)
724{
725	u_int x;
726
727	vm_page_lock_assert(m, MA_OWNED);
728
729	x = m->busy_lock;
730	if (x == VPB_UNBUSIED) {
731		vm_page_unlock(m);
732		return;
733	}
734	if ((x & VPB_BIT_WAITERS) == 0 &&
735	    !atomic_cmpset_int(&m->busy_lock, x, x | VPB_BIT_WAITERS)) {
736		vm_page_unlock(m);
737		return;
738	}
739	msleep(m, vm_page_lockptr(m), PVM | PDROP, wmesg, 0);
740}
741
742/*
743 *	vm_page_trysbusy:
744 *
745 *	Try to shared busy a page.
746 *	If the operation succeeds 1 is returned otherwise 0.
747 *	The operation never sleeps.
748 */
749int
750vm_page_trysbusy(vm_page_t m)
751{
752	u_int x;
753
754	for (;;) {
755		x = m->busy_lock;
756		if ((x & VPB_BIT_SHARED) == 0)
757			return (0);
758		if (atomic_cmpset_acq_int(&m->busy_lock, x, x + VPB_ONE_SHARER))
759			return (1);
760	}
761}
762
763/*
764 *	vm_page_xunbusy_hard:
765 *
766 *	Called after the first try the exclusive unbusy of a page failed.
767 *	It is assumed that the waiters bit is on.
768 */
769void
770vm_page_xunbusy_hard(vm_page_t m)
771{
772
773	vm_page_assert_xbusied(m);
774
775	vm_page_lock(m);
776	atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
777	wakeup(m);
778	vm_page_unlock(m);
779}
780
781/*
782 *	vm_page_flash:
783 *
784 *	Wakeup anyone waiting for the page.
785 *	The ownership bits do not change.
786 *
787 *	The given page must be locked.
788 */
789void
790vm_page_flash(vm_page_t m)
791{
792	u_int x;
793
794	vm_page_lock_assert(m, MA_OWNED);
795
796	for (;;) {
797		x = m->busy_lock;
798		if ((x & VPB_BIT_WAITERS) == 0)
799			return;
800		if (atomic_cmpset_int(&m->busy_lock, x,
801		    x & (~VPB_BIT_WAITERS)))
802			break;
803	}
804	wakeup(m);
805}
806
807/*
808 * Keep page from being freed by the page daemon
809 * much of the same effect as wiring, except much lower
810 * overhead and should be used only for *very* temporary
811 * holding ("wiring").
812 */
813void
814vm_page_hold(vm_page_t mem)
815{
816
817	vm_page_lock_assert(mem, MA_OWNED);
818        mem->hold_count++;
819}
820
821void
822vm_page_unhold(vm_page_t mem)
823{
824
825	vm_page_lock_assert(mem, MA_OWNED);
826	KASSERT(mem->hold_count >= 1, ("vm_page_unhold: hold count < 0!!!"));
827	--mem->hold_count;
828	if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
829		vm_page_free_toq(mem);
830}
831
832/*
833 *	vm_page_unhold_pages:
834 *
835 *	Unhold each of the pages that is referenced by the given array.
836 */
837void
838vm_page_unhold_pages(vm_page_t *ma, int count)
839{
840	struct mtx *mtx, *new_mtx;
841
842	mtx = NULL;
843	for (; count != 0; count--) {
844		/*
845		 * Avoid releasing and reacquiring the same page lock.
846		 */
847		new_mtx = vm_page_lockptr(*ma);
848		if (mtx != new_mtx) {
849			if (mtx != NULL)
850				mtx_unlock(mtx);
851			mtx = new_mtx;
852			mtx_lock(mtx);
853		}
854		vm_page_unhold(*ma);
855		ma++;
856	}
857	if (mtx != NULL)
858		mtx_unlock(mtx);
859}
860
861vm_page_t
862PHYS_TO_VM_PAGE(vm_paddr_t pa)
863{
864	vm_page_t m;
865
866#ifdef VM_PHYSSEG_SPARSE
867	m = vm_phys_paddr_to_vm_page(pa);
868	if (m == NULL)
869		m = vm_phys_fictitious_to_vm_page(pa);
870	return (m);
871#elif defined(VM_PHYSSEG_DENSE)
872	long pi;
873
874	pi = atop(pa);
875	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
876		m = &vm_page_array[pi - first_page];
877		return (m);
878	}
879	return (vm_phys_fictitious_to_vm_page(pa));
880#else
881#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
882#endif
883}
884
885/*
886 *	vm_page_getfake:
887 *
888 *	Create a fictitious page with the specified physical address and
889 *	memory attribute.  The memory attribute is the only the machine-
890 *	dependent aspect of a fictitious page that must be initialized.
891 */
892vm_page_t
893vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
894{
895	vm_page_t m;
896
897	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
898	vm_page_initfake(m, paddr, memattr);
899	return (m);
900}
901
902void
903vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
904{
905
906	if ((m->flags & PG_FICTITIOUS) != 0) {
907		/*
908		 * The page's memattr might have changed since the
909		 * previous initialization.  Update the pmap to the
910		 * new memattr.
911		 */
912		goto memattr;
913	}
914	m->phys_addr = paddr;
915	m->queue = PQ_NONE;
916	/* Fictitious pages don't use "segind". */
917	m->flags = PG_FICTITIOUS;
918	/* Fictitious pages don't use "order" or "pool". */
919	m->oflags = VPO_UNMANAGED;
920	m->busy_lock = VPB_SINGLE_EXCLUSIVER;
921	m->wire_count = 1;
922	pmap_page_init(m);
923memattr:
924	pmap_page_set_memattr(m, memattr);
925}
926
927/*
928 *	vm_page_putfake:
929 *
930 *	Release a fictitious page.
931 */
932void
933vm_page_putfake(vm_page_t m)
934{
935
936	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
937	KASSERT((m->flags & PG_FICTITIOUS) != 0,
938	    ("vm_page_putfake: bad page %p", m));
939	uma_zfree(fakepg_zone, m);
940}
941
942/*
943 *	vm_page_updatefake:
944 *
945 *	Update the given fictitious page to the specified physical address and
946 *	memory attribute.
947 */
948void
949vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
950{
951
952	KASSERT((m->flags & PG_FICTITIOUS) != 0,
953	    ("vm_page_updatefake: bad page %p", m));
954	m->phys_addr = paddr;
955	pmap_page_set_memattr(m, memattr);
956}
957
958/*
959 *	vm_page_free:
960 *
961 *	Free a page.
962 */
963void
964vm_page_free(vm_page_t m)
965{
966
967	m->flags &= ~PG_ZERO;
968	vm_page_free_toq(m);
969}
970
971/*
972 *	vm_page_free_zero:
973 *
974 *	Free a page to the zerod-pages queue
975 */
976void
977vm_page_free_zero(vm_page_t m)
978{
979
980	m->flags |= PG_ZERO;
981	vm_page_free_toq(m);
982}
983
984/*
985 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
986 * array which was optionally read ahead or behind.
987 */
988void
989vm_page_readahead_finish(vm_page_t m)
990{
991
992	/* We shouldn't put invalid pages on queues. */
993	KASSERT(m->valid != 0, ("%s: %p is invalid", __func__, m));
994
995	/*
996	 * Since the page is not the actually needed one, whether it should
997	 * be activated or deactivated is not obvious.  Empirical results
998	 * have shown that deactivating the page is usually the best choice,
999	 * unless the page is wanted by another thread.
1000	 */
1001	vm_page_lock(m);
1002	if ((m->busy_lock & VPB_BIT_WAITERS) != 0)
1003		vm_page_activate(m);
1004	else
1005		vm_page_deactivate(m);
1006	vm_page_unlock(m);
1007	vm_page_xunbusy(m);
1008}
1009
1010/*
1011 *	vm_page_sleep_if_busy:
1012 *
1013 *	Sleep and release the page queues lock if the page is busied.
1014 *	Returns TRUE if the thread slept.
1015 *
1016 *	The given page must be unlocked and object containing it must
1017 *	be locked.
1018 */
1019int
1020vm_page_sleep_if_busy(vm_page_t m, const char *msg)
1021{
1022	vm_object_t obj;
1023
1024	vm_page_lock_assert(m, MA_NOTOWNED);
1025	VM_OBJECT_ASSERT_WLOCKED(m->object);
1026
1027	if (vm_page_busied(m)) {
1028		/*
1029		 * The page-specific object must be cached because page
1030		 * identity can change during the sleep, causing the
1031		 * re-lock of a different object.
1032		 * It is assumed that a reference to the object is already
1033		 * held by the callers.
1034		 */
1035		obj = m->object;
1036		vm_page_lock(m);
1037		VM_OBJECT_WUNLOCK(obj);
1038		vm_page_busy_sleep(m, msg);
1039		VM_OBJECT_WLOCK(obj);
1040		return (TRUE);
1041	}
1042	return (FALSE);
1043}
1044
1045/*
1046 *	vm_page_dirty_KBI:		[ internal use only ]
1047 *
1048 *	Set all bits in the page's dirty field.
1049 *
1050 *	The object containing the specified page must be locked if the
1051 *	call is made from the machine-independent layer.
1052 *
1053 *	See vm_page_clear_dirty_mask().
1054 *
1055 *	This function should only be called by vm_page_dirty().
1056 */
1057void
1058vm_page_dirty_KBI(vm_page_t m)
1059{
1060
1061	/* These assertions refer to this operation by its public name. */
1062	KASSERT((m->flags & PG_CACHED) == 0,
1063	    ("vm_page_dirty: page in cache!"));
1064	KASSERT(m->valid == VM_PAGE_BITS_ALL,
1065	    ("vm_page_dirty: page is invalid!"));
1066	m->dirty = VM_PAGE_BITS_ALL;
1067}
1068
1069/*
1070 *	vm_page_insert:		[ internal use only ]
1071 *
1072 *	Inserts the given mem entry into the object and object list.
1073 *
1074 *	The object must be locked.
1075 */
1076int
1077vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
1078{
1079	vm_page_t mpred;
1080
1081	VM_OBJECT_ASSERT_WLOCKED(object);
1082	mpred = vm_radix_lookup_le(&object->rtree, pindex);
1083	return (vm_page_insert_after(m, object, pindex, mpred));
1084}
1085
1086/*
1087 *	vm_page_insert_after:
1088 *
1089 *	Inserts the page "m" into the specified object at offset "pindex".
1090 *
1091 *	The page "mpred" must immediately precede the offset "pindex" within
1092 *	the specified object.
1093 *
1094 *	The object must be locked.
1095 */
1096static int
1097vm_page_insert_after(vm_page_t m, vm_object_t object, vm_pindex_t pindex,
1098    vm_page_t mpred)
1099{
1100	vm_pindex_t sidx;
1101	vm_object_t sobj;
1102	vm_page_t msucc;
1103
1104	VM_OBJECT_ASSERT_WLOCKED(object);
1105	KASSERT(m->object == NULL,
1106	    ("vm_page_insert_after: page already inserted"));
1107	if (mpred != NULL) {
1108		KASSERT(mpred->object == object,
1109		    ("vm_page_insert_after: object doesn't contain mpred"));
1110		KASSERT(mpred->pindex < pindex,
1111		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1112		msucc = TAILQ_NEXT(mpred, listq);
1113	} else
1114		msucc = TAILQ_FIRST(&object->memq);
1115	if (msucc != NULL)
1116		KASSERT(msucc->pindex > pindex,
1117		    ("vm_page_insert_after: msucc doesn't succeed pindex"));
1118
1119	/*
1120	 * Record the object/offset pair in this page
1121	 */
1122	sobj = m->object;
1123	sidx = m->pindex;
1124	m->object = object;
1125	m->pindex = pindex;
1126
1127	/*
1128	 * Now link into the object's ordered list of backed pages.
1129	 */
1130	if (vm_radix_insert(&object->rtree, m)) {
1131		m->object = sobj;
1132		m->pindex = sidx;
1133		return (1);
1134	}
1135	vm_page_insert_radixdone(m, object, mpred);
1136	return (0);
1137}
1138
1139/*
1140 *	vm_page_insert_radixdone:
1141 *
1142 *	Complete page "m" insertion into the specified object after the
1143 *	radix trie hooking.
1144 *
1145 *	The page "mpred" must precede the offset "m->pindex" within the
1146 *	specified object.
1147 *
1148 *	The object must be locked.
1149 */
1150static void
1151vm_page_insert_radixdone(vm_page_t m, vm_object_t object, vm_page_t mpred)
1152{
1153
1154	VM_OBJECT_ASSERT_WLOCKED(object);
1155	KASSERT(object != NULL && m->object == object,
1156	    ("vm_page_insert_radixdone: page %p has inconsistent object", m));
1157	if (mpred != NULL) {
1158		KASSERT(mpred->object == object,
1159		    ("vm_page_insert_after: object doesn't contain mpred"));
1160		KASSERT(mpred->pindex < m->pindex,
1161		    ("vm_page_insert_after: mpred doesn't precede pindex"));
1162	}
1163
1164	if (mpred != NULL)
1165		TAILQ_INSERT_AFTER(&object->memq, mpred, m, listq);
1166	else
1167		TAILQ_INSERT_HEAD(&object->memq, m, listq);
1168
1169	/*
1170	 * Show that the object has one more resident page.
1171	 */
1172	object->resident_page_count++;
1173
1174	/*
1175	 * Hold the vnode until the last page is released.
1176	 */
1177	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
1178		vhold(object->handle);
1179
1180	/*
1181	 * Since we are inserting a new and possibly dirty page,
1182	 * update the object's OBJ_MIGHTBEDIRTY flag.
1183	 */
1184	if (pmap_page_is_write_mapped(m))
1185		vm_object_set_writeable_dirty(object);
1186}
1187
1188/*
1189 *	vm_page_remove:
1190 *
1191 *	Removes the given mem entry from the object/offset-page
1192 *	table and the object page list, but do not invalidate/terminate
1193 *	the backing store.
1194 *
1195 *	The object must be locked.  The page must be locked if it is managed.
1196 */
1197void
1198vm_page_remove(vm_page_t m)
1199{
1200	vm_object_t object;
1201	boolean_t lockacq;
1202
1203	if ((m->oflags & VPO_UNMANAGED) == 0)
1204		vm_page_lock_assert(m, MA_OWNED);
1205	if ((object = m->object) == NULL)
1206		return;
1207	VM_OBJECT_ASSERT_WLOCKED(object);
1208	if (vm_page_xbusied(m)) {
1209		lockacq = FALSE;
1210		if ((m->oflags & VPO_UNMANAGED) != 0 &&
1211		    !mtx_owned(vm_page_lockptr(m))) {
1212			lockacq = TRUE;
1213			vm_page_lock(m);
1214		}
1215		vm_page_flash(m);
1216		atomic_store_rel_int(&m->busy_lock, VPB_UNBUSIED);
1217		if (lockacq)
1218			vm_page_unlock(m);
1219	}
1220
1221	/*
1222	 * Now remove from the object's list of backed pages.
1223	 */
1224	vm_radix_remove(&object->rtree, m->pindex);
1225	TAILQ_REMOVE(&object->memq, m, listq);
1226
1227	/*
1228	 * And show that the object has one fewer resident page.
1229	 */
1230	object->resident_page_count--;
1231
1232	/*
1233	 * The vnode may now be recycled.
1234	 */
1235	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1236		vdrop(object->handle);
1237
1238	m->object = NULL;
1239}
1240
1241/*
1242 *	vm_page_lookup:
1243 *
1244 *	Returns the page associated with the object/offset
1245 *	pair specified; if none is found, NULL is returned.
1246 *
1247 *	The object must be locked.
1248 */
1249vm_page_t
1250vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1251{
1252
1253	VM_OBJECT_ASSERT_LOCKED(object);
1254	return (vm_radix_lookup(&object->rtree, pindex));
1255}
1256
1257/*
1258 *	vm_page_find_least:
1259 *
1260 *	Returns the page associated with the object with least pindex
1261 *	greater than or equal to the parameter pindex, or NULL.
1262 *
1263 *	The object must be locked.
1264 */
1265vm_page_t
1266vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1267{
1268	vm_page_t m;
1269
1270	VM_OBJECT_ASSERT_LOCKED(object);
1271	if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex)
1272		m = vm_radix_lookup_ge(&object->rtree, pindex);
1273	return (m);
1274}
1275
1276/*
1277 * Returns the given page's successor (by pindex) within the object if it is
1278 * resident; if none is found, NULL is returned.
1279 *
1280 * The object must be locked.
1281 */
1282vm_page_t
1283vm_page_next(vm_page_t m)
1284{
1285	vm_page_t next;
1286
1287	VM_OBJECT_ASSERT_WLOCKED(m->object);
1288	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
1289	    next->pindex != m->pindex + 1)
1290		next = NULL;
1291	return (next);
1292}
1293
1294/*
1295 * Returns the given page's predecessor (by pindex) within the object if it is
1296 * resident; if none is found, NULL is returned.
1297 *
1298 * The object must be locked.
1299 */
1300vm_page_t
1301vm_page_prev(vm_page_t m)
1302{
1303	vm_page_t prev;
1304
1305	VM_OBJECT_ASSERT_WLOCKED(m->object);
1306	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1307	    prev->pindex != m->pindex - 1)
1308		prev = NULL;
1309	return (prev);
1310}
1311
1312/*
1313 * Uses the page mnew as a replacement for an existing page at index
1314 * pindex which must be already present in the object.
1315 *
1316 * The existing page must not be on a paging queue.
1317 */
1318vm_page_t
1319vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex)
1320{
1321	vm_page_t mold;
1322
1323	VM_OBJECT_ASSERT_WLOCKED(object);
1324	KASSERT(mnew->object == NULL,
1325	    ("vm_page_replace: page already in object"));
1326
1327	/*
1328	 * This function mostly follows vm_page_insert() and
1329	 * vm_page_remove() without the radix, object count and vnode
1330	 * dance.  Double check such functions for more comments.
1331	 */
1332
1333	mnew->object = object;
1334	mnew->pindex = pindex;
1335	mold = vm_radix_replace(&object->rtree, mnew);
1336	KASSERT(mold->queue == PQ_NONE,
1337	    ("vm_page_replace: mold is on a paging queue"));
1338
1339	/* Keep the resident page list in sorted order. */
1340	TAILQ_INSERT_AFTER(&object->memq, mold, mnew, listq);
1341	TAILQ_REMOVE(&object->memq, mold, listq);
1342
1343	mold->object = NULL;
1344	vm_page_xunbusy(mold);
1345
1346	/*
1347	 * The object's resident_page_count does not change because we have
1348	 * swapped one page for another, but OBJ_MIGHTBEDIRTY.
1349	 */
1350	if (pmap_page_is_write_mapped(mnew))
1351		vm_object_set_writeable_dirty(object);
1352	return (mold);
1353}
1354
1355/*
1356 *	vm_page_rename:
1357 *
1358 *	Move the given memory entry from its
1359 *	current object to the specified target object/offset.
1360 *
1361 *	Note: swap associated with the page must be invalidated by the move.  We
1362 *	      have to do this for several reasons:  (1) we aren't freeing the
1363 *	      page, (2) we are dirtying the page, (3) the VM system is probably
1364 *	      moving the page from object A to B, and will then later move
1365 *	      the backing store from A to B and we can't have a conflict.
1366 *
1367 *	Note: we *always* dirty the page.  It is necessary both for the
1368 *	      fact that we moved it, and because we may be invalidating
1369 *	      swap.  If the page is on the cache, we have to deactivate it
1370 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
1371 *	      on the cache.
1372 *
1373 *	The objects must be locked.
1374 */
1375int
1376vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1377{
1378	vm_page_t mpred;
1379	vm_pindex_t opidx;
1380
1381	VM_OBJECT_ASSERT_WLOCKED(new_object);
1382
1383	mpred = vm_radix_lookup_le(&new_object->rtree, new_pindex);
1384	KASSERT(mpred == NULL || mpred->pindex != new_pindex,
1385	    ("vm_page_rename: pindex already renamed"));
1386
1387	/*
1388	 * Create a custom version of vm_page_insert() which does not depend
1389	 * by m_prev and can cheat on the implementation aspects of the
1390	 * function.
1391	 */
1392	opidx = m->pindex;
1393	m->pindex = new_pindex;
1394	if (vm_radix_insert(&new_object->rtree, m)) {
1395		m->pindex = opidx;
1396		return (1);
1397	}
1398
1399	/*
1400	 * The operation cannot fail anymore.  The removal must happen before
1401	 * the listq iterator is tainted.
1402	 */
1403	m->pindex = opidx;
1404	vm_page_lock(m);
1405	vm_page_remove(m);
1406
1407	/* Return back to the new pindex to complete vm_page_insert(). */
1408	m->pindex = new_pindex;
1409	m->object = new_object;
1410	vm_page_unlock(m);
1411	vm_page_insert_radixdone(m, new_object, mpred);
1412	vm_page_dirty(m);
1413	return (0);
1414}
1415
1416/*
1417 *	Convert all of the given object's cached pages that have a
1418 *	pindex within the given range into free pages.  If the value
1419 *	zero is given for "end", then the range's upper bound is
1420 *	infinity.  If the given object is backed by a vnode and it
1421 *	transitions from having one or more cached pages to none, the
1422 *	vnode's hold count is reduced.
1423 */
1424void
1425vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1426{
1427	vm_page_t m;
1428	boolean_t empty;
1429
1430	mtx_lock(&vm_page_queue_free_mtx);
1431	if (__predict_false(vm_radix_is_empty(&object->cache))) {
1432		mtx_unlock(&vm_page_queue_free_mtx);
1433		return;
1434	}
1435	while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) {
1436		if (end != 0 && m->pindex >= end)
1437			break;
1438		vm_radix_remove(&object->cache, m->pindex);
1439		vm_page_cache_turn_free(m);
1440	}
1441	empty = vm_radix_is_empty(&object->cache);
1442	mtx_unlock(&vm_page_queue_free_mtx);
1443	if (object->type == OBJT_VNODE && empty)
1444		vdrop(object->handle);
1445}
1446
1447/*
1448 *	Returns the cached page that is associated with the given
1449 *	object and offset.  If, however, none exists, returns NULL.
1450 *
1451 *	The free page queue must be locked.
1452 */
1453static inline vm_page_t
1454vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1455{
1456
1457	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1458	return (vm_radix_lookup(&object->cache, pindex));
1459}
1460
1461/*
1462 *	Remove the given cached page from its containing object's
1463 *	collection of cached pages.
1464 *
1465 *	The free page queue must be locked.
1466 */
1467static void
1468vm_page_cache_remove(vm_page_t m)
1469{
1470
1471	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1472	KASSERT((m->flags & PG_CACHED) != 0,
1473	    ("vm_page_cache_remove: page %p is not cached", m));
1474	vm_radix_remove(&m->object->cache, m->pindex);
1475	m->object = NULL;
1476	vm_cnt.v_cache_count--;
1477}
1478
1479/*
1480 *	Transfer all of the cached pages with offset greater than or
1481 *	equal to 'offidxstart' from the original object's cache to the
1482 *	new object's cache.  However, any cached pages with offset
1483 *	greater than or equal to the new object's size are kept in the
1484 *	original object.  Initially, the new object's cache must be
1485 *	empty.  Offset 'offidxstart' in the original object must
1486 *	correspond to offset zero in the new object.
1487 *
1488 *	The new object must be locked.
1489 */
1490void
1491vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1492    vm_object_t new_object)
1493{
1494	vm_page_t m;
1495
1496	/*
1497	 * Insertion into an object's collection of cached pages
1498	 * requires the object to be locked.  In contrast, removal does
1499	 * not.
1500	 */
1501	VM_OBJECT_ASSERT_WLOCKED(new_object);
1502	KASSERT(vm_radix_is_empty(&new_object->cache),
1503	    ("vm_page_cache_transfer: object %p has cached pages",
1504	    new_object));
1505	mtx_lock(&vm_page_queue_free_mtx);
1506	while ((m = vm_radix_lookup_ge(&orig_object->cache,
1507	    offidxstart)) != NULL) {
1508		/*
1509		 * Transfer all of the pages with offset greater than or
1510		 * equal to 'offidxstart' from the original object's
1511		 * cache to the new object's cache.
1512		 */
1513		if ((m->pindex - offidxstart) >= new_object->size)
1514			break;
1515		vm_radix_remove(&orig_object->cache, m->pindex);
1516		/* Update the page's object and offset. */
1517		m->object = new_object;
1518		m->pindex -= offidxstart;
1519		if (vm_radix_insert(&new_object->cache, m))
1520			vm_page_cache_turn_free(m);
1521	}
1522	mtx_unlock(&vm_page_queue_free_mtx);
1523}
1524
1525/*
1526 *	Returns TRUE if a cached page is associated with the given object and
1527 *	offset, and FALSE otherwise.
1528 *
1529 *	The object must be locked.
1530 */
1531boolean_t
1532vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1533{
1534	vm_page_t m;
1535
1536	/*
1537	 * Insertion into an object's collection of cached pages requires the
1538	 * object to be locked.  Therefore, if the object is locked and the
1539	 * object's collection is empty, there is no need to acquire the free
1540	 * page queues lock in order to prove that the specified page doesn't
1541	 * exist.
1542	 */
1543	VM_OBJECT_ASSERT_WLOCKED(object);
1544	if (__predict_true(vm_object_cache_is_empty(object)))
1545		return (FALSE);
1546	mtx_lock(&vm_page_queue_free_mtx);
1547	m = vm_page_cache_lookup(object, pindex);
1548	mtx_unlock(&vm_page_queue_free_mtx);
1549	return (m != NULL);
1550}
1551
1552/*
1553 *	vm_page_alloc:
1554 *
1555 *	Allocate and return a page that is associated with the specified
1556 *	object and offset pair.  By default, this page is exclusive busied.
1557 *
1558 *	The caller must always specify an allocation class.
1559 *
1560 *	allocation classes:
1561 *	VM_ALLOC_NORMAL		normal process request
1562 *	VM_ALLOC_SYSTEM		system *really* needs a page
1563 *	VM_ALLOC_INTERRUPT	interrupt time request
1564 *
1565 *	optional allocation flags:
1566 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1567 *				intends to allocate
1568 *	VM_ALLOC_IFCACHED	return page only if it is cached
1569 *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1570 *				is cached
1571 *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1572 *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1573 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1574 *				should not be exclusive busy
1575 *	VM_ALLOC_SBUSY		shared busy the allocated page
1576 *	VM_ALLOC_WIRED		wire the allocated page
1577 *	VM_ALLOC_ZERO		prefer a zeroed page
1578 *
1579 *	This routine may not sleep.
1580 */
1581vm_page_t
1582vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1583{
1584	struct vnode *vp = NULL;
1585	vm_object_t m_object;
1586	vm_page_t m, mpred;
1587	int flags, req_class;
1588
1589	mpred = 0;	/* XXX: pacify gcc */
1590	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1591	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1592	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1593	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1594	    ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1595	    req));
1596	if (object != NULL)
1597		VM_OBJECT_ASSERT_WLOCKED(object);
1598
1599	req_class = req & VM_ALLOC_CLASS_MASK;
1600
1601	/*
1602	 * The page daemon is allowed to dig deeper into the free page list.
1603	 */
1604	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1605		req_class = VM_ALLOC_SYSTEM;
1606
1607	if (object != NULL) {
1608		mpred = vm_radix_lookup_le(&object->rtree, pindex);
1609		KASSERT(mpred == NULL || mpred->pindex != pindex,
1610		   ("vm_page_alloc: pindex already allocated"));
1611	}
1612
1613	/*
1614	 * The page allocation request can came from consumers which already
1615	 * hold the free page queue mutex, like vm_page_insert() in
1616	 * vm_page_cache().
1617	 */
1618	mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
1619	if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
1620	    (req_class == VM_ALLOC_SYSTEM &&
1621	    vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
1622	    (req_class == VM_ALLOC_INTERRUPT &&
1623	    vm_cnt.v_free_count + vm_cnt.v_cache_count > 0)) {
1624		/*
1625		 * Allocate from the free queue if the number of free pages
1626		 * exceeds the minimum for the request class.
1627		 */
1628		if (object != NULL &&
1629		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1630			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1631				mtx_unlock(&vm_page_queue_free_mtx);
1632				return (NULL);
1633			}
1634			if (vm_phys_unfree_page(m))
1635				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1636#if VM_NRESERVLEVEL > 0
1637			else if (!vm_reserv_reactivate_page(m))
1638#else
1639			else
1640#endif
1641				panic("vm_page_alloc: cache page %p is missing"
1642				    " from the free queue", m);
1643		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1644			mtx_unlock(&vm_page_queue_free_mtx);
1645			return (NULL);
1646#if VM_NRESERVLEVEL > 0
1647		} else if (object == NULL || (object->flags & (OBJ_COLORED |
1648		    OBJ_FICTITIOUS)) != OBJ_COLORED || (m =
1649		    vm_reserv_alloc_page(object, pindex, mpred)) == NULL) {
1650#else
1651		} else {
1652#endif
1653			m = vm_phys_alloc_pages(object != NULL ?
1654			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1655#if VM_NRESERVLEVEL > 0
1656			if (m == NULL && vm_reserv_reclaim_inactive()) {
1657				m = vm_phys_alloc_pages(object != NULL ?
1658				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1659				    0);
1660			}
1661#endif
1662		}
1663	} else {
1664		/*
1665		 * Not allocatable, give up.
1666		 */
1667		mtx_unlock(&vm_page_queue_free_mtx);
1668		atomic_add_int(&vm_pageout_deficit,
1669		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1670		pagedaemon_wakeup();
1671		return (NULL);
1672	}
1673
1674	/*
1675	 *  At this point we had better have found a good page.
1676	 */
1677	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1678	KASSERT(m->queue == PQ_NONE,
1679	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1680	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1681	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1682	KASSERT(!vm_page_sbusied(m),
1683	    ("vm_page_alloc: page %p is busy", m));
1684	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1685	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1686	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1687	    pmap_page_get_memattr(m)));
1688	if ((m->flags & PG_CACHED) != 0) {
1689		KASSERT((m->flags & PG_ZERO) == 0,
1690		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1691		KASSERT(m->valid != 0,
1692		    ("vm_page_alloc: cached page %p is invalid", m));
1693		if (m->object == object && m->pindex == pindex)
1694			vm_cnt.v_reactivated++;
1695		else
1696			m->valid = 0;
1697		m_object = m->object;
1698		vm_page_cache_remove(m);
1699		if (m_object->type == OBJT_VNODE &&
1700		    vm_object_cache_is_empty(m_object))
1701			vp = m_object->handle;
1702	} else {
1703		KASSERT(m->valid == 0,
1704		    ("vm_page_alloc: free page %p is valid", m));
1705		vm_phys_freecnt_adj(m, -1);
1706		if ((m->flags & PG_ZERO) != 0)
1707			vm_page_zero_count--;
1708	}
1709	mtx_unlock(&vm_page_queue_free_mtx);
1710
1711	/*
1712	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1713	 */
1714	flags = 0;
1715	if ((req & VM_ALLOC_ZERO) != 0)
1716		flags = PG_ZERO;
1717	flags &= m->flags;
1718	if ((req & VM_ALLOC_NODUMP) != 0)
1719		flags |= PG_NODUMP;
1720	m->flags = flags;
1721	m->aflags = 0;
1722	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1723	    VPO_UNMANAGED : 0;
1724	m->busy_lock = VPB_UNBUSIED;
1725	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ | VM_ALLOC_SBUSY)) == 0)
1726		m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1727	if ((req & VM_ALLOC_SBUSY) != 0)
1728		m->busy_lock = VPB_SHARERS_WORD(1);
1729	if (req & VM_ALLOC_WIRED) {
1730		/*
1731		 * The page lock is not required for wiring a page until that
1732		 * page is inserted into the object.
1733		 */
1734		atomic_add_int(&vm_cnt.v_wire_count, 1);
1735		m->wire_count = 1;
1736	}
1737	m->act_count = 0;
1738
1739	if (object != NULL) {
1740		if (vm_page_insert_after(m, object, pindex, mpred)) {
1741			/* See the comment below about hold count. */
1742			if (vp != NULL)
1743				vdrop(vp);
1744			pagedaemon_wakeup();
1745			if (req & VM_ALLOC_WIRED) {
1746				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1747				m->wire_count = 0;
1748			}
1749			m->object = NULL;
1750			m->oflags = VPO_UNMANAGED;
1751			vm_page_free(m);
1752			return (NULL);
1753		}
1754
1755		/* Ignore device objects; the pager sets "memattr" for them. */
1756		if (object->memattr != VM_MEMATTR_DEFAULT &&
1757		    (object->flags & OBJ_FICTITIOUS) == 0)
1758			pmap_page_set_memattr(m, object->memattr);
1759	} else
1760		m->pindex = pindex;
1761
1762	/*
1763	 * The following call to vdrop() must come after the above call
1764	 * to vm_page_insert() in case both affect the same object and
1765	 * vnode.  Otherwise, the affected vnode's hold count could
1766	 * temporarily become zero.
1767	 */
1768	if (vp != NULL)
1769		vdrop(vp);
1770
1771	/*
1772	 * Don't wakeup too often - wakeup the pageout daemon when
1773	 * we would be nearly out of memory.
1774	 */
1775	if (vm_paging_needed())
1776		pagedaemon_wakeup();
1777
1778	return (m);
1779}
1780
1781static void
1782vm_page_alloc_contig_vdrop(struct spglist *lst)
1783{
1784
1785	while (!SLIST_EMPTY(lst)) {
1786		vdrop((struct vnode *)SLIST_FIRST(lst)-> plinks.s.pv);
1787		SLIST_REMOVE_HEAD(lst, plinks.s.ss);
1788	}
1789}
1790
1791/*
1792 *	vm_page_alloc_contig:
1793 *
1794 *	Allocate a contiguous set of physical pages of the given size "npages"
1795 *	from the free lists.  All of the physical pages must be at or above
1796 *	the given physical address "low" and below the given physical address
1797 *	"high".  The given value "alignment" determines the alignment of the
1798 *	first physical page in the set.  If the given value "boundary" is
1799 *	non-zero, then the set of physical pages cannot cross any physical
1800 *	address boundary that is a multiple of that value.  Both "alignment"
1801 *	and "boundary" must be a power of two.
1802 *
1803 *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1804 *	then the memory attribute setting for the physical pages is configured
1805 *	to the object's memory attribute setting.  Otherwise, the memory
1806 *	attribute setting for the physical pages is configured to "memattr",
1807 *	overriding the object's memory attribute setting.  However, if the
1808 *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1809 *	memory attribute setting for the physical pages cannot be configured
1810 *	to VM_MEMATTR_DEFAULT.
1811 *
1812 *	The caller must always specify an allocation class.
1813 *
1814 *	allocation classes:
1815 *	VM_ALLOC_NORMAL		normal process request
1816 *	VM_ALLOC_SYSTEM		system *really* needs a page
1817 *	VM_ALLOC_INTERRUPT	interrupt time request
1818 *
1819 *	optional allocation flags:
1820 *	VM_ALLOC_NOBUSY		do not exclusive busy the page
1821 *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1822 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1823 *				should not be exclusive busy
1824 *	VM_ALLOC_SBUSY		shared busy the allocated page
1825 *	VM_ALLOC_WIRED		wire the allocated page
1826 *	VM_ALLOC_ZERO		prefer a zeroed page
1827 *
1828 *	This routine may not sleep.
1829 */
1830vm_page_t
1831vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1832    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1833    vm_paddr_t boundary, vm_memattr_t memattr)
1834{
1835	struct vnode *drop;
1836	struct spglist deferred_vdrop_list;
1837	vm_page_t m, m_tmp, m_ret;
1838	u_int flags;
1839	int req_class;
1840
1841	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0) &&
1842	    (object != NULL || (req & VM_ALLOC_SBUSY) == 0) &&
1843	    ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) !=
1844	    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)),
1845	    ("vm_page_alloc: inconsistent object(%p)/req(%x)", (void *)object,
1846	    req));
1847	if (object != NULL) {
1848		VM_OBJECT_ASSERT_WLOCKED(object);
1849		KASSERT(object->type == OBJT_PHYS,
1850		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1851		    object));
1852	}
1853	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1854	req_class = req & VM_ALLOC_CLASS_MASK;
1855
1856	/*
1857	 * The page daemon is allowed to dig deeper into the free page list.
1858	 */
1859	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1860		req_class = VM_ALLOC_SYSTEM;
1861
1862	SLIST_INIT(&deferred_vdrop_list);
1863	mtx_lock(&vm_page_queue_free_mtx);
1864	if (vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
1865	    vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1866	    vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages +
1867	    vm_cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1868	    vm_cnt.v_free_count + vm_cnt.v_cache_count >= npages)) {
1869#if VM_NRESERVLEVEL > 0
1870retry:
1871		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1872		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1873		    low, high, alignment, boundary)) == NULL)
1874#endif
1875			m_ret = vm_phys_alloc_contig(npages, low, high,
1876			    alignment, boundary);
1877	} else {
1878		mtx_unlock(&vm_page_queue_free_mtx);
1879		atomic_add_int(&vm_pageout_deficit, npages);
1880		pagedaemon_wakeup();
1881		return (NULL);
1882	}
1883	if (m_ret != NULL)
1884		for (m = m_ret; m < &m_ret[npages]; m++) {
1885			drop = vm_page_alloc_init(m);
1886			if (drop != NULL) {
1887				/*
1888				 * Enqueue the vnode for deferred vdrop().
1889				 */
1890				m->plinks.s.pv = drop;
1891				SLIST_INSERT_HEAD(&deferred_vdrop_list, m,
1892				    plinks.s.ss);
1893			}
1894		}
1895	else {
1896#if VM_NRESERVLEVEL > 0
1897		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1898		    boundary))
1899			goto retry;
1900#endif
1901	}
1902	mtx_unlock(&vm_page_queue_free_mtx);
1903	if (m_ret == NULL)
1904		return (NULL);
1905
1906	/*
1907	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1908	 */
1909	flags = 0;
1910	if ((req & VM_ALLOC_ZERO) != 0)
1911		flags = PG_ZERO;
1912	if ((req & VM_ALLOC_NODUMP) != 0)
1913		flags |= PG_NODUMP;
1914	if ((req & VM_ALLOC_WIRED) != 0)
1915		atomic_add_int(&vm_cnt.v_wire_count, npages);
1916	if (object != NULL) {
1917		if (object->memattr != VM_MEMATTR_DEFAULT &&
1918		    memattr == VM_MEMATTR_DEFAULT)
1919			memattr = object->memattr;
1920	}
1921	for (m = m_ret; m < &m_ret[npages]; m++) {
1922		m->aflags = 0;
1923		m->flags = (m->flags | PG_NODUMP) & flags;
1924		m->busy_lock = VPB_UNBUSIED;
1925		if (object != NULL) {
1926			if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
1927				m->busy_lock = VPB_SINGLE_EXCLUSIVER;
1928			if ((req & VM_ALLOC_SBUSY) != 0)
1929				m->busy_lock = VPB_SHARERS_WORD(1);
1930		}
1931		if ((req & VM_ALLOC_WIRED) != 0)
1932			m->wire_count = 1;
1933		/* Unmanaged pages don't use "act_count". */
1934		m->oflags = VPO_UNMANAGED;
1935		if (object != NULL) {
1936			if (vm_page_insert(m, object, pindex)) {
1937				vm_page_alloc_contig_vdrop(
1938				    &deferred_vdrop_list);
1939				if (vm_paging_needed())
1940					pagedaemon_wakeup();
1941				if ((req & VM_ALLOC_WIRED) != 0)
1942					atomic_subtract_int(&vm_cnt.v_wire_count,
1943					    npages);
1944				for (m_tmp = m, m = m_ret;
1945				    m < &m_ret[npages]; m++) {
1946					if ((req & VM_ALLOC_WIRED) != 0)
1947						m->wire_count = 0;
1948					if (m >= m_tmp)
1949						m->object = NULL;
1950					vm_page_free(m);
1951				}
1952				return (NULL);
1953			}
1954		} else
1955			m->pindex = pindex;
1956		if (memattr != VM_MEMATTR_DEFAULT)
1957			pmap_page_set_memattr(m, memattr);
1958		pindex++;
1959	}
1960	vm_page_alloc_contig_vdrop(&deferred_vdrop_list);
1961	if (vm_paging_needed())
1962		pagedaemon_wakeup();
1963	return (m_ret);
1964}
1965
1966/*
1967 * Initialize a page that has been freshly dequeued from a freelist.
1968 * The caller has to drop the vnode returned, if it is not NULL.
1969 *
1970 * This function may only be used to initialize unmanaged pages.
1971 *
1972 * To be called with vm_page_queue_free_mtx held.
1973 */
1974static struct vnode *
1975vm_page_alloc_init(vm_page_t m)
1976{
1977	struct vnode *drop;
1978	vm_object_t m_object;
1979
1980	KASSERT(m->queue == PQ_NONE,
1981	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1982	    m, m->queue));
1983	KASSERT(m->wire_count == 0,
1984	    ("vm_page_alloc_init: page %p is wired", m));
1985	KASSERT(m->hold_count == 0,
1986	    ("vm_page_alloc_init: page %p is held", m));
1987	KASSERT(!vm_page_sbusied(m),
1988	    ("vm_page_alloc_init: page %p is busy", m));
1989	KASSERT(m->dirty == 0,
1990	    ("vm_page_alloc_init: page %p is dirty", m));
1991	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1992	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1993	    m, pmap_page_get_memattr(m)));
1994	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1995	drop = NULL;
1996	if ((m->flags & PG_CACHED) != 0) {
1997		KASSERT((m->flags & PG_ZERO) == 0,
1998		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1999		m->valid = 0;
2000		m_object = m->object;
2001		vm_page_cache_remove(m);
2002		if (m_object->type == OBJT_VNODE &&
2003		    vm_object_cache_is_empty(m_object))
2004			drop = m_object->handle;
2005	} else {
2006		KASSERT(m->valid == 0,
2007		    ("vm_page_alloc_init: free page %p is valid", m));
2008		vm_phys_freecnt_adj(m, -1);
2009		if ((m->flags & PG_ZERO) != 0)
2010			vm_page_zero_count--;
2011	}
2012	return (drop);
2013}
2014
2015/*
2016 * 	vm_page_alloc_freelist:
2017 *
2018 *	Allocate a physical page from the specified free page list.
2019 *
2020 *	The caller must always specify an allocation class.
2021 *
2022 *	allocation classes:
2023 *	VM_ALLOC_NORMAL		normal process request
2024 *	VM_ALLOC_SYSTEM		system *really* needs a page
2025 *	VM_ALLOC_INTERRUPT	interrupt time request
2026 *
2027 *	optional allocation flags:
2028 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
2029 *				intends to allocate
2030 *	VM_ALLOC_WIRED		wire the allocated page
2031 *	VM_ALLOC_ZERO		prefer a zeroed page
2032 *
2033 *	This routine may not sleep.
2034 */
2035vm_page_t
2036vm_page_alloc_freelist(int flind, int req)
2037{
2038	struct vnode *drop;
2039	vm_page_t m;
2040	u_int flags;
2041	int req_class;
2042
2043	req_class = req & VM_ALLOC_CLASS_MASK;
2044
2045	/*
2046	 * The page daemon is allowed to dig deeper into the free page list.
2047	 */
2048	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2049		req_class = VM_ALLOC_SYSTEM;
2050
2051	/*
2052	 * Do not allocate reserved pages unless the req has asked for it.
2053	 */
2054	mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE);
2055	if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved ||
2056	    (req_class == VM_ALLOC_SYSTEM &&
2057	    vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_interrupt_free_min) ||
2058	    (req_class == VM_ALLOC_INTERRUPT &&
2059	    vm_cnt.v_free_count + vm_cnt.v_cache_count > 0))
2060		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
2061	else {
2062		mtx_unlock(&vm_page_queue_free_mtx);
2063		atomic_add_int(&vm_pageout_deficit,
2064		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
2065		pagedaemon_wakeup();
2066		return (NULL);
2067	}
2068	if (m == NULL) {
2069		mtx_unlock(&vm_page_queue_free_mtx);
2070		return (NULL);
2071	}
2072	drop = vm_page_alloc_init(m);
2073	mtx_unlock(&vm_page_queue_free_mtx);
2074
2075	/*
2076	 * Initialize the page.  Only the PG_ZERO flag is inherited.
2077	 */
2078	m->aflags = 0;
2079	flags = 0;
2080	if ((req & VM_ALLOC_ZERO) != 0)
2081		flags = PG_ZERO;
2082	m->flags &= flags;
2083	if ((req & VM_ALLOC_WIRED) != 0) {
2084		/*
2085		 * The page lock is not required for wiring a page that does
2086		 * not belong to an object.
2087		 */
2088		atomic_add_int(&vm_cnt.v_wire_count, 1);
2089		m->wire_count = 1;
2090	}
2091	/* Unmanaged pages don't use "act_count". */
2092	m->oflags = VPO_UNMANAGED;
2093	if (drop != NULL)
2094		vdrop(drop);
2095	if (vm_paging_needed())
2096		pagedaemon_wakeup();
2097	return (m);
2098}
2099
2100#define	VPSC_ANY	0	/* No restrictions. */
2101#define	VPSC_NORESERV	1	/* Skip reservations; implies VPSC_NOSUPER. */
2102#define	VPSC_NOSUPER	2	/* Skip superpages. */
2103
2104/*
2105 *	vm_page_scan_contig:
2106 *
2107 *	Scan vm_page_array[] between the specified entries "m_start" and
2108 *	"m_end" for a run of contiguous physical pages that satisfy the
2109 *	specified conditions, and return the lowest page in the run.  The
2110 *	specified "alignment" determines the alignment of the lowest physical
2111 *	page in the run.  If the specified "boundary" is non-zero, then the
2112 *	run of physical pages cannot span a physical address that is a
2113 *	multiple of "boundary".
2114 *
2115 *	"m_end" is never dereferenced, so it need not point to a vm_page
2116 *	structure within vm_page_array[].
2117 *
2118 *	"npages" must be greater than zero.  "m_start" and "m_end" must not
2119 *	span a hole (or discontiguity) in the physical address space.  Both
2120 *	"alignment" and "boundary" must be a power of two.
2121 */
2122vm_page_t
2123vm_page_scan_contig(u_long npages, vm_page_t m_start, vm_page_t m_end,
2124    u_long alignment, vm_paddr_t boundary, int options)
2125{
2126	struct mtx *m_mtx, *new_mtx;
2127	vm_object_t object;
2128	vm_paddr_t pa;
2129	vm_page_t m, m_run;
2130#if VM_NRESERVLEVEL > 0
2131	int level;
2132#endif
2133	int m_inc, order, run_ext, run_len;
2134
2135	KASSERT(npages > 0, ("npages is 0"));
2136	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2137	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2138	m_run = NULL;
2139	run_len = 0;
2140	m_mtx = NULL;
2141	for (m = m_start; m < m_end && run_len < npages; m += m_inc) {
2142		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2143		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2144
2145		/*
2146		 * If the current page would be the start of a run, check its
2147		 * physical address against the end, alignment, and boundary
2148		 * conditions.  If it doesn't satisfy these conditions, either
2149		 * terminate the scan or advance to the next page that
2150		 * satisfies the failed condition.
2151		 */
2152		if (run_len == 0) {
2153			KASSERT(m_run == NULL, ("m_run != NULL"));
2154			if (m + npages > m_end)
2155				break;
2156			pa = VM_PAGE_TO_PHYS(m);
2157			if ((pa & (alignment - 1)) != 0) {
2158				m_inc = atop(roundup2(pa, alignment) - pa);
2159				continue;
2160			}
2161			if (((pa ^ (pa + ptoa(npages) - 1)) & ~(boundary -
2162			    1)) != 0) {
2163				m_inc = atop(roundup2(pa, boundary) - pa);
2164				continue;
2165			}
2166		} else
2167			KASSERT(m_run != NULL, ("m_run == NULL"));
2168
2169		/*
2170		 * Avoid releasing and reacquiring the same page lock.
2171		 */
2172		new_mtx = vm_page_lockptr(m);
2173		if (m_mtx != new_mtx) {
2174			if (m_mtx != NULL)
2175				mtx_unlock(m_mtx);
2176			m_mtx = new_mtx;
2177			mtx_lock(m_mtx);
2178		}
2179		m_inc = 1;
2180retry:
2181		if (m->wire_count != 0 || m->hold_count != 0)
2182			run_ext = 0;
2183#if VM_NRESERVLEVEL > 0
2184		else if ((level = vm_reserv_level(m)) >= 0 &&
2185		    (options & VPSC_NORESERV) != 0) {
2186			run_ext = 0;
2187			/* Advance to the end of the reservation. */
2188			pa = VM_PAGE_TO_PHYS(m);
2189			m_inc = atop(roundup2(pa + 1, vm_reserv_size(level)) -
2190			    pa);
2191		}
2192#endif
2193		else if ((object = m->object) != NULL) {
2194			/*
2195			 * The page is considered eligible for relocation if
2196			 * and only if it could be laundered or reclaimed by
2197			 * the page daemon.
2198			 */
2199			if (!VM_OBJECT_TRYRLOCK(object)) {
2200				mtx_unlock(m_mtx);
2201				VM_OBJECT_RLOCK(object);
2202				mtx_lock(m_mtx);
2203				if (m->object != object) {
2204					/*
2205					 * The page may have been freed.
2206					 */
2207					VM_OBJECT_RUNLOCK(object);
2208					goto retry;
2209				} else if (m->wire_count != 0 ||
2210				    m->hold_count != 0) {
2211					run_ext = 0;
2212					goto unlock;
2213				}
2214			}
2215			KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2216			    ("page %p is PG_UNHOLDFREE", m));
2217			/* Don't care: PG_NODUMP, PG_WINATCFLS, PG_ZERO. */
2218			if (object->type != OBJT_DEFAULT &&
2219			    object->type != OBJT_SWAP &&
2220			    object->type != OBJT_VNODE)
2221				run_ext = 0;
2222			else if ((m->flags & PG_CACHED) != 0 ||
2223			    m != vm_page_lookup(object, m->pindex)) {
2224				/*
2225				 * The page is cached or recently converted
2226				 * from cached to free.
2227				 */
2228#if VM_NRESERVLEVEL > 0
2229				if (level >= 0) {
2230					/*
2231					 * The page is reserved.  Extend the
2232					 * current run by one page.
2233					 */
2234					run_ext = 1;
2235				} else
2236#endif
2237				if ((order = m->order) < VM_NFREEORDER) {
2238					/*
2239					 * The page is enqueued in the
2240					 * physical memory allocator's cache/
2241					 * free page queues.  Moreover, it is
2242					 * the first page in a power-of-two-
2243					 * sized run of contiguous cache/free
2244					 * pages.  Add these pages to the end
2245					 * of the current run, and jump
2246					 * ahead.
2247					 */
2248					run_ext = 1 << order;
2249					m_inc = 1 << order;
2250				} else
2251					run_ext = 0;
2252#if VM_NRESERVLEVEL > 0
2253			} else if ((options & VPSC_NOSUPER) != 0 &&
2254			    (level = vm_reserv_level_iffullpop(m)) >= 0) {
2255				run_ext = 0;
2256				/* Advance to the end of the superpage. */
2257				pa = VM_PAGE_TO_PHYS(m);
2258				m_inc = atop(roundup2(pa + 1,
2259				    vm_reserv_size(level)) - pa);
2260#endif
2261			} else if (object->memattr == VM_MEMATTR_DEFAULT &&
2262			    m->queue != PQ_NONE && !vm_page_busied(m)) {
2263				/*
2264				 * The page is allocated but eligible for
2265				 * relocation.  Extend the current run by one
2266				 * page.
2267				 */
2268				KASSERT(pmap_page_get_memattr(m) ==
2269				    VM_MEMATTR_DEFAULT,
2270				    ("page %p has an unexpected memattr", m));
2271				KASSERT((m->oflags & (VPO_SWAPINPROG |
2272				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2273				    ("page %p has unexpected oflags", m));
2274				/* Don't care: VPO_NOSYNC. */
2275				run_ext = 1;
2276			} else
2277				run_ext = 0;
2278unlock:
2279			VM_OBJECT_RUNLOCK(object);
2280#if VM_NRESERVLEVEL > 0
2281		} else if (level >= 0) {
2282			/*
2283			 * The page is reserved but not yet allocated.  In
2284			 * other words, it is still cached or free.  Extend
2285			 * the current run by one page.
2286			 */
2287			run_ext = 1;
2288#endif
2289		} else if ((order = m->order) < VM_NFREEORDER) {
2290			/*
2291			 * The page is enqueued in the physical memory
2292			 * allocator's cache/free page queues.  Moreover, it
2293			 * is the first page in a power-of-two-sized run of
2294			 * contiguous cache/free pages.  Add these pages to
2295			 * the end of the current run, and jump ahead.
2296			 */
2297			run_ext = 1 << order;
2298			m_inc = 1 << order;
2299		} else {
2300			/*
2301			 * Skip the page for one of the following reasons: (1)
2302			 * It is enqueued in the physical memory allocator's
2303			 * cache/free page queues.  However, it is not the
2304			 * first page in a run of contiguous cache/free pages.
2305			 * (This case rarely occurs because the scan is
2306			 * performed in ascending order.) (2) It is not
2307			 * reserved, and it is transitioning from free to
2308			 * allocated.  (Conversely, the transition from
2309			 * allocated to free for managed pages is blocked by
2310			 * the page lock.) (3) It is allocated but not
2311			 * contained by an object and not wired, e.g.,
2312			 * allocated by Xen's balloon driver.
2313			 */
2314			run_ext = 0;
2315		}
2316
2317		/*
2318		 * Extend or reset the current run of pages.
2319		 */
2320		if (run_ext > 0) {
2321			if (run_len == 0)
2322				m_run = m;
2323			run_len += run_ext;
2324		} else {
2325			if (run_len > 0) {
2326				m_run = NULL;
2327				run_len = 0;
2328			}
2329		}
2330	}
2331	if (m_mtx != NULL)
2332		mtx_unlock(m_mtx);
2333	if (run_len >= npages)
2334		return (m_run);
2335	return (NULL);
2336}
2337
2338/*
2339 *	vm_page_reclaim_run:
2340 *
2341 *	Try to relocate each of the allocated virtual pages within the
2342 *	specified run of physical pages to a new physical address.  Free the
2343 *	physical pages underlying the relocated virtual pages.  A virtual page
2344 *	is relocatable if and only if it could be laundered or reclaimed by
2345 *	the page daemon.  Whenever possible, a virtual page is relocated to a
2346 *	physical address above "high".
2347 *
2348 *	Returns 0 if every physical page within the run was already free or
2349 *	just freed by a successful relocation.  Otherwise, returns a non-zero
2350 *	value indicating why the last attempt to relocate a virtual page was
2351 *	unsuccessful.
2352 *
2353 *	"req_class" must be an allocation class.
2354 */
2355static int
2356vm_page_reclaim_run(int req_class, u_long npages, vm_page_t m_run,
2357    vm_paddr_t high)
2358{
2359	struct mtx *m_mtx, *new_mtx;
2360	struct spglist free;
2361	vm_object_t object;
2362	vm_paddr_t pa;
2363	vm_page_t m, m_end, m_new;
2364	int error, order, req;
2365
2366	KASSERT((req_class & VM_ALLOC_CLASS_MASK) == req_class,
2367	    ("req_class is not an allocation class"));
2368	SLIST_INIT(&free);
2369	error = 0;
2370	m = m_run;
2371	m_end = m_run + npages;
2372	m_mtx = NULL;
2373	for (; error == 0 && m < m_end; m++) {
2374		KASSERT((m->flags & (PG_FICTITIOUS | PG_MARKER)) == 0,
2375		    ("page %p is PG_FICTITIOUS or PG_MARKER", m));
2376
2377		/*
2378		 * Avoid releasing and reacquiring the same page lock.
2379		 */
2380		new_mtx = vm_page_lockptr(m);
2381		if (m_mtx != new_mtx) {
2382			if (m_mtx != NULL)
2383				mtx_unlock(m_mtx);
2384			m_mtx = new_mtx;
2385			mtx_lock(m_mtx);
2386		}
2387retry:
2388		if (m->wire_count != 0 || m->hold_count != 0)
2389			error = EBUSY;
2390		else if ((object = m->object) != NULL) {
2391			/*
2392			 * The page is relocated if and only if it could be
2393			 * laundered or reclaimed by the page daemon.
2394			 */
2395			if (!VM_OBJECT_TRYWLOCK(object)) {
2396				mtx_unlock(m_mtx);
2397				VM_OBJECT_WLOCK(object);
2398				mtx_lock(m_mtx);
2399				if (m->object != object) {
2400					/*
2401					 * The page may have been freed.
2402					 */
2403					VM_OBJECT_WUNLOCK(object);
2404					goto retry;
2405				} else if (m->wire_count != 0 ||
2406				    m->hold_count != 0) {
2407					error = EBUSY;
2408					goto unlock;
2409				}
2410			}
2411			KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2412			    ("page %p is PG_UNHOLDFREE", m));
2413			/* Don't care: PG_NODUMP, PG_WINATCFLS, PG_ZERO. */
2414			if (object->type != OBJT_DEFAULT &&
2415			    object->type != OBJT_SWAP &&
2416			    object->type != OBJT_VNODE)
2417				error = EINVAL;
2418			else if ((m->flags & PG_CACHED) != 0 ||
2419			    m != vm_page_lookup(object, m->pindex)) {
2420				/*
2421				 * The page is cached or recently converted
2422				 * from cached to free.
2423				 */
2424				VM_OBJECT_WUNLOCK(object);
2425				goto cached;
2426			} else if (object->memattr != VM_MEMATTR_DEFAULT)
2427				error = EINVAL;
2428			else if (m->queue != PQ_NONE && !vm_page_busied(m)) {
2429				KASSERT(pmap_page_get_memattr(m) ==
2430				    VM_MEMATTR_DEFAULT,
2431				    ("page %p has an unexpected memattr", m));
2432				KASSERT((m->oflags & (VPO_SWAPINPROG |
2433				    VPO_SWAPSLEEP | VPO_UNMANAGED)) == 0,
2434				    ("page %p has unexpected oflags", m));
2435				/* Don't care: VPO_NOSYNC. */
2436				if (m->valid != 0) {
2437					/*
2438					 * First, try to allocate a new page
2439					 * that is above "high".  Failing
2440					 * that, try to allocate a new page
2441					 * that is below "m_run".  Allocate
2442					 * the new page between the end of
2443					 * "m_run" and "high" only as a last
2444					 * resort.
2445					 */
2446					req = req_class | VM_ALLOC_NOOBJ;
2447					if ((m->flags & PG_NODUMP) != 0)
2448						req |= VM_ALLOC_NODUMP;
2449					if (trunc_page(high) !=
2450					    ~(vm_paddr_t)PAGE_MASK) {
2451						m_new = vm_page_alloc_contig(
2452						    NULL, 0, req, 1,
2453						    round_page(high),
2454						    ~(vm_paddr_t)0,
2455						    PAGE_SIZE, 0,
2456						    VM_MEMATTR_DEFAULT);
2457					} else
2458						m_new = NULL;
2459					if (m_new == NULL) {
2460						pa = VM_PAGE_TO_PHYS(m_run);
2461						m_new = vm_page_alloc_contig(
2462						    NULL, 0, req, 1,
2463						    0, pa - 1, PAGE_SIZE, 0,
2464						    VM_MEMATTR_DEFAULT);
2465					}
2466					if (m_new == NULL) {
2467						pa += ptoa(npages);
2468						m_new = vm_page_alloc_contig(
2469						    NULL, 0, req, 1,
2470						    pa, high, PAGE_SIZE, 0,
2471						    VM_MEMATTR_DEFAULT);
2472					}
2473					if (m_new == NULL) {
2474						error = ENOMEM;
2475						goto unlock;
2476					}
2477					KASSERT(m_new->wire_count == 0,
2478					    ("page %p is wired", m));
2479
2480					/*
2481					 * Replace "m" with the new page.  For
2482					 * vm_page_replace(), "m" must be busy
2483					 * and dequeued.  Finally, change "m"
2484					 * as if vm_page_free() was called.
2485					 */
2486					if (object->ref_count != 0)
2487						pmap_remove_all(m);
2488					m_new->aflags = m->aflags;
2489					KASSERT(m_new->oflags == VPO_UNMANAGED,
2490					    ("page %p is managed", m));
2491					m_new->oflags = m->oflags & VPO_NOSYNC;
2492					pmap_copy_page(m, m_new);
2493					m_new->valid = m->valid;
2494					m_new->dirty = m->dirty;
2495					m->flags &= ~PG_ZERO;
2496					vm_page_xbusy(m);
2497					vm_page_remque(m);
2498					vm_page_replace_checked(m_new, object,
2499					    m->pindex, m);
2500					m->valid = 0;
2501					vm_page_undirty(m);
2502
2503					/*
2504					 * The new page must be deactivated
2505					 * before the object is unlocked.
2506					 */
2507					new_mtx = vm_page_lockptr(m_new);
2508					if (m_mtx != new_mtx) {
2509						mtx_unlock(m_mtx);
2510						m_mtx = new_mtx;
2511						mtx_lock(m_mtx);
2512					}
2513					vm_page_deactivate(m_new);
2514				} else {
2515					m->flags &= ~PG_ZERO;
2516					vm_page_remque(m);
2517					vm_page_remove(m);
2518					KASSERT(m->dirty == 0,
2519					    ("page %p is dirty", m));
2520				}
2521				SLIST_INSERT_HEAD(&free, m, plinks.s.ss);
2522			} else
2523				error = EBUSY;
2524unlock:
2525			VM_OBJECT_WUNLOCK(object);
2526		} else {
2527cached:
2528			mtx_lock(&vm_page_queue_free_mtx);
2529			order = m->order;
2530			if (order < VM_NFREEORDER) {
2531				/*
2532				 * The page is enqueued in the physical memory
2533				 * allocator's cache/free page queues.
2534				 * Moreover, it is the first page in a power-
2535				 * of-two-sized run of contiguous cache/free
2536				 * pages.  Jump ahead to the last page within
2537				 * that run, and continue from there.
2538				 */
2539				m += (1 << order) - 1;
2540			}
2541#if VM_NRESERVLEVEL > 0
2542			else if (vm_reserv_is_page_free(m))
2543				order = 0;
2544#endif
2545			mtx_unlock(&vm_page_queue_free_mtx);
2546			if (order == VM_NFREEORDER)
2547				error = EINVAL;
2548		}
2549	}
2550	if (m_mtx != NULL)
2551		mtx_unlock(m_mtx);
2552	if ((m = SLIST_FIRST(&free)) != NULL) {
2553		mtx_lock(&vm_page_queue_free_mtx);
2554		do {
2555			SLIST_REMOVE_HEAD(&free, plinks.s.ss);
2556			vm_phys_freecnt_adj(m, 1);
2557#if VM_NRESERVLEVEL > 0
2558			if (!vm_reserv_free_page(m))
2559#else
2560			if (true)
2561#endif
2562				vm_phys_free_pages(m, 0);
2563		} while ((m = SLIST_FIRST(&free)) != NULL);
2564		vm_page_zero_idle_wakeup();
2565		vm_page_free_wakeup();
2566		mtx_unlock(&vm_page_queue_free_mtx);
2567	}
2568	return (error);
2569}
2570
2571#define	NRUNS	16
2572
2573CTASSERT(powerof2(NRUNS));
2574
2575#define	RUN_INDEX(count)	((count) & (NRUNS - 1))
2576
2577#define	MIN_RECLAIM	8
2578
2579/*
2580 *	vm_page_reclaim_contig:
2581 *
2582 *	Reclaim allocated, contiguous physical memory satisfying the specified
2583 *	conditions by relocating the virtual pages using that physical memory.
2584 *	Returns true if reclamation is successful and false otherwise.  Since
2585 *	relocation requires the allocation of physical pages, reclamation may
2586 *	fail due to a shortage of cache/free pages.  When reclamation fails,
2587 *	callers are expected to perform VM_WAIT before retrying a failed
2588 *	allocation operation, e.g., vm_page_alloc_contig().
2589 *
2590 *	The caller must always specify an allocation class through "req".
2591 *
2592 *	allocation classes:
2593 *	VM_ALLOC_NORMAL		normal process request
2594 *	VM_ALLOC_SYSTEM		system *really* needs a page
2595 *	VM_ALLOC_INTERRUPT	interrupt time request
2596 *
2597 *	The optional allocation flags are ignored.
2598 *
2599 *	"npages" must be greater than zero.  Both "alignment" and "boundary"
2600 *	must be a power of two.
2601 */
2602bool
2603vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high,
2604    u_long alignment, vm_paddr_t boundary)
2605{
2606	vm_paddr_t curr_low;
2607	vm_page_t m_run, m_runs[NRUNS];
2608	u_long count, reclaimed;
2609	int error, i, options, req_class;
2610
2611	KASSERT(npages > 0, ("npages is 0"));
2612	KASSERT(powerof2(alignment), ("alignment is not a power of 2"));
2613	KASSERT(powerof2(boundary), ("boundary is not a power of 2"));
2614	req_class = req & VM_ALLOC_CLASS_MASK;
2615
2616	/*
2617	 * The page daemon is allowed to dig deeper into the free page list.
2618	 */
2619	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
2620		req_class = VM_ALLOC_SYSTEM;
2621
2622	/*
2623	 * Return if the number of cached and free pages cannot satisfy the
2624	 * requested allocation.
2625	 */
2626	count = vm_cnt.v_free_count + vm_cnt.v_cache_count;
2627	if (count < npages + vm_cnt.v_free_reserved || (count < npages +
2628	    vm_cnt.v_interrupt_free_min && req_class == VM_ALLOC_SYSTEM) ||
2629	    (count < npages && req_class == VM_ALLOC_INTERRUPT))
2630		return (false);
2631
2632	/*
2633	 * Scan up to three times, relaxing the restrictions ("options") on
2634	 * the reclamation of reservations and superpages each time.
2635	 */
2636	for (options = VPSC_NORESERV;;) {
2637		/*
2638		 * Find the highest runs that satisfy the given constraints
2639		 * and restrictions, and record them in "m_runs".
2640		 */
2641		curr_low = low;
2642		count = 0;
2643		for (;;) {
2644			m_run = vm_phys_scan_contig(npages, curr_low, high,
2645			    alignment, boundary, options);
2646			if (m_run == NULL)
2647				break;
2648			curr_low = VM_PAGE_TO_PHYS(m_run) + ptoa(npages);
2649			m_runs[RUN_INDEX(count)] = m_run;
2650			count++;
2651		}
2652
2653		/*
2654		 * Reclaim the highest runs in LIFO (descending) order until
2655		 * the number of reclaimed pages, "reclaimed", is at least
2656		 * MIN_RECLAIM.  Reset "reclaimed" each time because each
2657		 * reclamation is idempotent, and runs will (likely) recur
2658		 * from one scan to the next as restrictions are relaxed.
2659		 */
2660		reclaimed = 0;
2661		for (i = 0; count > 0 && i < NRUNS; i++) {
2662			count--;
2663			m_run = m_runs[RUN_INDEX(count)];
2664			error = vm_page_reclaim_run(req_class, npages, m_run,
2665			    high);
2666			if (error == 0) {
2667				reclaimed += npages;
2668				if (reclaimed >= MIN_RECLAIM)
2669					return (true);
2670			}
2671		}
2672
2673		/*
2674		 * Either relax the restrictions on the next scan or return if
2675		 * the last scan had no restrictions.
2676		 */
2677		if (options == VPSC_NORESERV)
2678			options = VPSC_NOSUPER;
2679		else if (options == VPSC_NOSUPER)
2680			options = VPSC_ANY;
2681		else if (options == VPSC_ANY)
2682			return (reclaimed != 0);
2683	}
2684}
2685
2686/*
2687 *	vm_wait:	(also see VM_WAIT macro)
2688 *
2689 *	Sleep until free pages are available for allocation.
2690 *	- Called in various places before memory allocations.
2691 */
2692void
2693vm_wait(void)
2694{
2695
2696	mtx_lock(&vm_page_queue_free_mtx);
2697	if (curproc == pageproc) {
2698		vm_pageout_pages_needed = 1;
2699		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
2700		    PDROP | PSWP, "VMWait", 0);
2701	} else {
2702		if (!vm_pages_needed) {
2703			vm_pages_needed = 1;
2704			wakeup(&vm_pages_needed);
2705		}
2706		msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
2707		    "vmwait", 0);
2708	}
2709}
2710
2711/*
2712 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
2713 *
2714 *	Sleep until free pages are available for allocation.
2715 *	- Called only in vm_fault so that processes page faulting
2716 *	  can be easily tracked.
2717 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
2718 *	  processes will be able to grab memory first.  Do not change
2719 *	  this balance without careful testing first.
2720 */
2721void
2722vm_waitpfault(void)
2723{
2724
2725	mtx_lock(&vm_page_queue_free_mtx);
2726	if (!vm_pages_needed) {
2727		vm_pages_needed = 1;
2728		wakeup(&vm_pages_needed);
2729	}
2730	msleep(&vm_cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
2731	    "pfault", 0);
2732}
2733
2734struct vm_pagequeue *
2735vm_page_pagequeue(vm_page_t m)
2736{
2737
2738	return (&vm_phys_domain(m)->vmd_pagequeues[m->queue]);
2739}
2740
2741/*
2742 *	vm_page_dequeue:
2743 *
2744 *	Remove the given page from its current page queue.
2745 *
2746 *	The page must be locked.
2747 */
2748void
2749vm_page_dequeue(vm_page_t m)
2750{
2751	struct vm_pagequeue *pq;
2752
2753	vm_page_assert_locked(m);
2754	KASSERT(m->queue < PQ_COUNT, ("vm_page_dequeue: page %p is not queued",
2755	    m));
2756	pq = vm_page_pagequeue(m);
2757	vm_pagequeue_lock(pq);
2758	m->queue = PQ_NONE;
2759	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2760	vm_pagequeue_cnt_dec(pq);
2761	vm_pagequeue_unlock(pq);
2762}
2763
2764/*
2765 *	vm_page_dequeue_locked:
2766 *
2767 *	Remove the given page from its current page queue.
2768 *
2769 *	The page and page queue must be locked.
2770 */
2771void
2772vm_page_dequeue_locked(vm_page_t m)
2773{
2774	struct vm_pagequeue *pq;
2775
2776	vm_page_lock_assert(m, MA_OWNED);
2777	pq = vm_page_pagequeue(m);
2778	vm_pagequeue_assert_locked(pq);
2779	m->queue = PQ_NONE;
2780	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2781	vm_pagequeue_cnt_dec(pq);
2782}
2783
2784/*
2785 *	vm_page_enqueue:
2786 *
2787 *	Add the given page to the specified page queue.
2788 *
2789 *	The page must be locked.
2790 */
2791static void
2792vm_page_enqueue(uint8_t queue, vm_page_t m)
2793{
2794	struct vm_pagequeue *pq;
2795
2796	vm_page_lock_assert(m, MA_OWNED);
2797	KASSERT(queue < PQ_COUNT,
2798	    ("vm_page_enqueue: invalid queue %u request for page %p",
2799	    queue, m));
2800	pq = &vm_phys_domain(m)->vmd_pagequeues[queue];
2801	vm_pagequeue_lock(pq);
2802	m->queue = queue;
2803	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2804	vm_pagequeue_cnt_inc(pq);
2805	vm_pagequeue_unlock(pq);
2806}
2807
2808/*
2809 *	vm_page_requeue:
2810 *
2811 *	Move the given page to the tail of its current page queue.
2812 *
2813 *	The page must be locked.
2814 */
2815void
2816vm_page_requeue(vm_page_t m)
2817{
2818	struct vm_pagequeue *pq;
2819
2820	vm_page_lock_assert(m, MA_OWNED);
2821	KASSERT(m->queue != PQ_NONE,
2822	    ("vm_page_requeue: page %p is not queued", m));
2823	pq = vm_page_pagequeue(m);
2824	vm_pagequeue_lock(pq);
2825	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2826	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2827	vm_pagequeue_unlock(pq);
2828}
2829
2830/*
2831 *	vm_page_requeue_locked:
2832 *
2833 *	Move the given page to the tail of its current page queue.
2834 *
2835 *	The page queue must be locked.
2836 */
2837void
2838vm_page_requeue_locked(vm_page_t m)
2839{
2840	struct vm_pagequeue *pq;
2841
2842	KASSERT(m->queue != PQ_NONE,
2843	    ("vm_page_requeue_locked: page %p is not queued", m));
2844	pq = vm_page_pagequeue(m);
2845	vm_pagequeue_assert_locked(pq);
2846	TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
2847	TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
2848}
2849
2850/*
2851 *	vm_page_activate:
2852 *
2853 *	Put the specified page on the active list (if appropriate).
2854 *	Ensure that act_count is at least ACT_INIT but do not otherwise
2855 *	mess with it.
2856 *
2857 *	The page must be locked.
2858 */
2859void
2860vm_page_activate(vm_page_t m)
2861{
2862	int queue;
2863
2864	vm_page_lock_assert(m, MA_OWNED);
2865	if ((queue = m->queue) != PQ_ACTIVE) {
2866		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2867			if (m->act_count < ACT_INIT)
2868				m->act_count = ACT_INIT;
2869			if (queue != PQ_NONE)
2870				vm_page_dequeue(m);
2871			vm_page_enqueue(PQ_ACTIVE, m);
2872		} else
2873			KASSERT(queue == PQ_NONE,
2874			    ("vm_page_activate: wired page %p is queued", m));
2875	} else {
2876		if (m->act_count < ACT_INIT)
2877			m->act_count = ACT_INIT;
2878	}
2879}
2880
2881/*
2882 *	vm_page_free_wakeup:
2883 *
2884 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
2885 *	routine is called when a page has been added to the cache or free
2886 *	queues.
2887 *
2888 *	The page queues must be locked.
2889 */
2890static inline void
2891vm_page_free_wakeup(void)
2892{
2893
2894	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2895	/*
2896	 * if pageout daemon needs pages, then tell it that there are
2897	 * some free.
2898	 */
2899	if (vm_pageout_pages_needed &&
2900	    vm_cnt.v_cache_count + vm_cnt.v_free_count >= vm_cnt.v_pageout_free_min) {
2901		wakeup(&vm_pageout_pages_needed);
2902		vm_pageout_pages_needed = 0;
2903	}
2904	/*
2905	 * wakeup processes that are waiting on memory if we hit a
2906	 * high water mark. And wakeup scheduler process if we have
2907	 * lots of memory. this process will swapin processes.
2908	 */
2909	if (vm_pages_needed && !vm_page_count_min()) {
2910		vm_pages_needed = 0;
2911		wakeup(&vm_cnt.v_free_count);
2912	}
2913}
2914
2915/*
2916 *	Turn a cached page into a free page, by changing its attributes.
2917 *	Keep the statistics up-to-date.
2918 *
2919 *	The free page queue must be locked.
2920 */
2921static void
2922vm_page_cache_turn_free(vm_page_t m)
2923{
2924
2925	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2926
2927	m->object = NULL;
2928	m->valid = 0;
2929	KASSERT((m->flags & PG_CACHED) != 0,
2930	    ("vm_page_cache_turn_free: page %p is not cached", m));
2931	m->flags &= ~PG_CACHED;
2932	vm_cnt.v_cache_count--;
2933	vm_phys_freecnt_adj(m, 1);
2934}
2935
2936/*
2937 *	vm_page_free_toq:
2938 *
2939 *	Returns the given page to the free list,
2940 *	disassociating it with any VM object.
2941 *
2942 *	The object must be locked.  The page must be locked if it is managed.
2943 */
2944void
2945vm_page_free_toq(vm_page_t m)
2946{
2947
2948	if ((m->oflags & VPO_UNMANAGED) == 0) {
2949		vm_page_lock_assert(m, MA_OWNED);
2950		KASSERT(!pmap_page_is_mapped(m),
2951		    ("vm_page_free_toq: freeing mapped page %p", m));
2952	} else
2953		KASSERT(m->queue == PQ_NONE,
2954		    ("vm_page_free_toq: unmanaged page %p is queued", m));
2955	PCPU_INC(cnt.v_tfree);
2956
2957	if (vm_page_sbusied(m))
2958		panic("vm_page_free: freeing busy page %p", m);
2959
2960	/*
2961	 * Unqueue, then remove page.  Note that we cannot destroy
2962	 * the page here because we do not want to call the pager's
2963	 * callback routine until after we've put the page on the
2964	 * appropriate free queue.
2965	 */
2966	vm_page_remque(m);
2967	vm_page_remove(m);
2968
2969	/*
2970	 * If fictitious remove object association and
2971	 * return, otherwise delay object association removal.
2972	 */
2973	if ((m->flags & PG_FICTITIOUS) != 0) {
2974		return;
2975	}
2976
2977	m->valid = 0;
2978	vm_page_undirty(m);
2979
2980	if (m->wire_count != 0)
2981		panic("vm_page_free: freeing wired page %p", m);
2982	if (m->hold_count != 0) {
2983		m->flags &= ~PG_ZERO;
2984		KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2985		    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
2986		m->flags |= PG_UNHOLDFREE;
2987	} else {
2988		/*
2989		 * Restore the default memory attribute to the page.
2990		 */
2991		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2992			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2993
2994		/*
2995		 * Insert the page into the physical memory allocator's
2996		 * cache/free page queues.
2997		 */
2998		mtx_lock(&vm_page_queue_free_mtx);
2999		vm_phys_freecnt_adj(m, 1);
3000#if VM_NRESERVLEVEL > 0
3001		if (!vm_reserv_free_page(m))
3002#else
3003		if (TRUE)
3004#endif
3005			vm_phys_free_pages(m, 0);
3006		if ((m->flags & PG_ZERO) != 0)
3007			++vm_page_zero_count;
3008		else
3009			vm_page_zero_idle_wakeup();
3010		vm_page_free_wakeup();
3011		mtx_unlock(&vm_page_queue_free_mtx);
3012	}
3013}
3014
3015/*
3016 *	vm_page_wire:
3017 *
3018 *	Mark this page as wired down by yet
3019 *	another map, removing it from paging queues
3020 *	as necessary.
3021 *
3022 *	If the page is fictitious, then its wire count must remain one.
3023 *
3024 *	The page must be locked.
3025 */
3026void
3027vm_page_wire(vm_page_t m)
3028{
3029
3030	/*
3031	 * Only bump the wire statistics if the page is not already wired,
3032	 * and only unqueue the page if it is on some queue (if it is unmanaged
3033	 * it is already off the queues).
3034	 */
3035	vm_page_lock_assert(m, MA_OWNED);
3036	if ((m->flags & PG_FICTITIOUS) != 0) {
3037		KASSERT(m->wire_count == 1,
3038		    ("vm_page_wire: fictitious page %p's wire count isn't one",
3039		    m));
3040		return;
3041	}
3042	if (m->wire_count == 0) {
3043		KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
3044		    m->queue == PQ_NONE,
3045		    ("vm_page_wire: unmanaged page %p is queued", m));
3046		vm_page_remque(m);
3047		atomic_add_int(&vm_cnt.v_wire_count, 1);
3048	}
3049	m->wire_count++;
3050	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
3051}
3052
3053/*
3054 * vm_page_unwire:
3055 *
3056 * Release one wiring of the specified page, potentially allowing it to be
3057 * paged out.  Returns TRUE if the number of wirings transitions to zero and
3058 * FALSE otherwise.
3059 *
3060 * Only managed pages belonging to an object can be paged out.  If the number
3061 * of wirings transitions to zero and the page is eligible for page out, then
3062 * the page is added to the specified paging queue (unless PQ_NONE is
3063 * specified).
3064 *
3065 * If a page is fictitious, then its wire count must always be one.
3066 *
3067 * A managed page must be locked.
3068 */
3069boolean_t
3070vm_page_unwire(vm_page_t m, uint8_t queue)
3071{
3072
3073	KASSERT(queue < PQ_COUNT || queue == PQ_NONE,
3074	    ("vm_page_unwire: invalid queue %u request for page %p",
3075	    queue, m));
3076	if ((m->oflags & VPO_UNMANAGED) == 0)
3077		vm_page_assert_locked(m);
3078	if ((m->flags & PG_FICTITIOUS) != 0) {
3079		KASSERT(m->wire_count == 1,
3080	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
3081		return (FALSE);
3082	}
3083	if (m->wire_count > 0) {
3084		m->wire_count--;
3085		if (m->wire_count == 0) {
3086			atomic_subtract_int(&vm_cnt.v_wire_count, 1);
3087			if ((m->oflags & VPO_UNMANAGED) == 0 &&
3088			    m->object != NULL && queue != PQ_NONE) {
3089				if (queue == PQ_INACTIVE)
3090					m->flags &= ~PG_WINATCFLS;
3091				vm_page_enqueue(queue, m);
3092			}
3093			return (TRUE);
3094		} else
3095			return (FALSE);
3096	} else
3097		panic("vm_page_unwire: page %p's wire count is zero", m);
3098}
3099
3100/*
3101 * Move the specified page to the inactive queue.
3102 *
3103 * Many pages placed on the inactive queue should actually go
3104 * into the cache, but it is difficult to figure out which.  What
3105 * we do instead, if the inactive target is well met, is to put
3106 * clean pages at the head of the inactive queue instead of the tail.
3107 * This will cause them to be moved to the cache more quickly and
3108 * if not actively re-referenced, reclaimed more quickly.  If we just
3109 * stick these pages at the end of the inactive queue, heavy filesystem
3110 * meta-data accesses can cause an unnecessary paging load on memory bound
3111 * processes.  This optimization causes one-time-use metadata to be
3112 * reused more quickly.
3113 *
3114 * Normally noreuse is FALSE, resulting in LRU operation.  noreuse is set
3115 * to TRUE if we want this page to be 'as if it were placed in the cache',
3116 * except without unmapping it from the process address space.  In
3117 * practice this is implemented by inserting the page at the head of the
3118 * queue, using a marker page to guide FIFO insertion ordering.
3119 *
3120 * The page must be locked.
3121 */
3122static inline void
3123_vm_page_deactivate(vm_page_t m, boolean_t noreuse)
3124{
3125	struct vm_pagequeue *pq;
3126	int queue;
3127
3128	vm_page_assert_locked(m);
3129
3130	/*
3131	 * Ignore if the page is already inactive, unless it is unlikely to be
3132	 * reactivated.
3133	 */
3134	if ((queue = m->queue) == PQ_INACTIVE && !noreuse)
3135		return;
3136	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
3137		pq = &vm_phys_domain(m)->vmd_pagequeues[PQ_INACTIVE];
3138		/* Avoid multiple acquisitions of the inactive queue lock. */
3139		if (queue == PQ_INACTIVE) {
3140			vm_pagequeue_lock(pq);
3141			vm_page_dequeue_locked(m);
3142		} else {
3143			if (queue != PQ_NONE)
3144				vm_page_dequeue(m);
3145			m->flags &= ~PG_WINATCFLS;
3146			vm_pagequeue_lock(pq);
3147		}
3148		m->queue = PQ_INACTIVE;
3149		if (noreuse)
3150			TAILQ_INSERT_BEFORE(&vm_phys_domain(m)->vmd_inacthead,
3151			    m, plinks.q);
3152		else
3153			TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
3154		vm_pagequeue_cnt_inc(pq);
3155		vm_pagequeue_unlock(pq);
3156	}
3157}
3158
3159/*
3160 * Move the specified page to the inactive queue.
3161 *
3162 * The page must be locked.
3163 */
3164void
3165vm_page_deactivate(vm_page_t m)
3166{
3167
3168	_vm_page_deactivate(m, FALSE);
3169}
3170
3171/*
3172 * Move the specified page to the inactive queue with the expectation
3173 * that it is unlikely to be reused.
3174 *
3175 * The page must be locked.
3176 */
3177void
3178vm_page_deactivate_noreuse(vm_page_t m)
3179{
3180
3181	_vm_page_deactivate(m, TRUE);
3182}
3183
3184/*
3185 * vm_page_try_to_cache:
3186 *
3187 * Returns 0 on failure, 1 on success
3188 */
3189int
3190vm_page_try_to_cache(vm_page_t m)
3191{
3192
3193	vm_page_lock_assert(m, MA_OWNED);
3194	VM_OBJECT_ASSERT_WLOCKED(m->object);
3195	if (m->dirty || m->hold_count || m->wire_count ||
3196	    (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
3197		return (0);
3198	pmap_remove_all(m);
3199	if (m->dirty)
3200		return (0);
3201	vm_page_cache(m);
3202	return (1);
3203}
3204
3205/*
3206 * vm_page_try_to_free()
3207 *
3208 *	Attempt to free the page.  If we cannot free it, we do nothing.
3209 *	1 is returned on success, 0 on failure.
3210 */
3211int
3212vm_page_try_to_free(vm_page_t m)
3213{
3214
3215	vm_page_lock_assert(m, MA_OWNED);
3216	if (m->object != NULL)
3217		VM_OBJECT_ASSERT_WLOCKED(m->object);
3218	if (m->dirty || m->hold_count || m->wire_count ||
3219	    (m->oflags & VPO_UNMANAGED) != 0 || vm_page_busied(m))
3220		return (0);
3221	pmap_remove_all(m);
3222	if (m->dirty)
3223		return (0);
3224	vm_page_free(m);
3225	return (1);
3226}
3227
3228/*
3229 * vm_page_cache
3230 *
3231 * Put the specified page onto the page cache queue (if appropriate).
3232 *
3233 * The object and page must be locked.
3234 */
3235void
3236vm_page_cache(vm_page_t m)
3237{
3238	vm_object_t object;
3239	boolean_t cache_was_empty;
3240
3241	vm_page_lock_assert(m, MA_OWNED);
3242	object = m->object;
3243	VM_OBJECT_ASSERT_WLOCKED(object);
3244	if (vm_page_busied(m) || (m->oflags & VPO_UNMANAGED) ||
3245	    m->hold_count || m->wire_count)
3246		panic("vm_page_cache: attempting to cache busy page");
3247	KASSERT(!pmap_page_is_mapped(m),
3248	    ("vm_page_cache: page %p is mapped", m));
3249	KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
3250	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
3251	    (object->type == OBJT_SWAP &&
3252	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
3253		/*
3254		 * Hypothesis: A cache-eligible page belonging to a
3255		 * default object or swap object but without a backing
3256		 * store must be zero filled.
3257		 */
3258		vm_page_free(m);
3259		return;
3260	}
3261	KASSERT((m->flags & PG_CACHED) == 0,
3262	    ("vm_page_cache: page %p is already cached", m));
3263
3264	/*
3265	 * Remove the page from the paging queues.
3266	 */
3267	vm_page_remque(m);
3268
3269	/*
3270	 * Remove the page from the object's collection of resident
3271	 * pages.
3272	 */
3273	vm_radix_remove(&object->rtree, m->pindex);
3274	TAILQ_REMOVE(&object->memq, m, listq);
3275	object->resident_page_count--;
3276
3277	/*
3278	 * Restore the default memory attribute to the page.
3279	 */
3280	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
3281		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
3282
3283	/*
3284	 * Insert the page into the object's collection of cached pages
3285	 * and the physical memory allocator's cache/free page queues.
3286	 */
3287	m->flags &= ~PG_ZERO;
3288	mtx_lock(&vm_page_queue_free_mtx);
3289	cache_was_empty = vm_radix_is_empty(&object->cache);
3290	if (vm_radix_insert(&object->cache, m)) {
3291		mtx_unlock(&vm_page_queue_free_mtx);
3292		if (object->resident_page_count == 0)
3293			vdrop(object->handle);
3294		m->object = NULL;
3295		vm_page_free(m);
3296		return;
3297	}
3298
3299	/*
3300	 * The above call to vm_radix_insert() could reclaim the one pre-
3301	 * existing cached page from this object, resulting in a call to
3302	 * vdrop().
3303	 */
3304	if (!cache_was_empty)
3305		cache_was_empty = vm_radix_is_singleton(&object->cache);
3306
3307	m->flags |= PG_CACHED;
3308	vm_cnt.v_cache_count++;
3309	PCPU_INC(cnt.v_tcached);
3310#if VM_NRESERVLEVEL > 0
3311	if (!vm_reserv_free_page(m)) {
3312#else
3313	if (TRUE) {
3314#endif
3315		vm_phys_free_pages(m, 0);
3316	}
3317	vm_page_free_wakeup();
3318	mtx_unlock(&vm_page_queue_free_mtx);
3319
3320	/*
3321	 * Increment the vnode's hold count if this is the object's only
3322	 * cached page.  Decrement the vnode's hold count if this was
3323	 * the object's only resident page.
3324	 */
3325	if (object->type == OBJT_VNODE) {
3326		if (cache_was_empty && object->resident_page_count != 0)
3327			vhold(object->handle);
3328		else if (!cache_was_empty && object->resident_page_count == 0)
3329			vdrop(object->handle);
3330	}
3331}
3332
3333/*
3334 * vm_page_advise
3335 *
3336 * 	Deactivate or do nothing, as appropriate.
3337 *
3338 *	The object and page must be locked.
3339 */
3340void
3341vm_page_advise(vm_page_t m, int advice)
3342{
3343
3344	vm_page_assert_locked(m);
3345	VM_OBJECT_ASSERT_WLOCKED(m->object);
3346	if (advice == MADV_FREE)
3347		/*
3348		 * Mark the page clean.  This will allow the page to be freed
3349		 * up by the system.  However, such pages are often reused
3350		 * quickly by malloc() so we do not do anything that would
3351		 * cause a page fault if we can help it.
3352		 *
3353		 * Specifically, we do not try to actually free the page now
3354		 * nor do we try to put it in the cache (which would cause a
3355		 * page fault on reuse).
3356		 *
3357		 * But we do make the page as freeable as we can without
3358		 * actually taking the step of unmapping it.
3359		 */
3360		m->dirty = 0;
3361	else if (advice != MADV_DONTNEED)
3362		return;
3363
3364	/*
3365	 * Clear any references to the page.  Otherwise, the page daemon will
3366	 * immediately reactivate the page.
3367	 */
3368	vm_page_aflag_clear(m, PGA_REFERENCED);
3369
3370	if (advice != MADV_FREE && m->dirty == 0 && pmap_is_modified(m))
3371		vm_page_dirty(m);
3372
3373	/*
3374	 * Place clean pages at the head of the inactive queue rather than the
3375	 * tail, thus defeating the queue's LRU operation and ensuring that the
3376	 * page will be reused quickly.
3377	 */
3378	_vm_page_deactivate(m, m->dirty == 0);
3379}
3380
3381/*
3382 * Grab a page, waiting until we are waken up due to the page
3383 * changing state.  We keep on waiting, if the page continues
3384 * to be in the object.  If the page doesn't exist, first allocate it
3385 * and then conditionally zero it.
3386 *
3387 * This routine may sleep.
3388 *
3389 * The object must be locked on entry.  The lock will, however, be released
3390 * and reacquired if the routine sleeps.
3391 */
3392vm_page_t
3393vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
3394{
3395	vm_page_t m;
3396	int sleep;
3397
3398	VM_OBJECT_ASSERT_WLOCKED(object);
3399	KASSERT((allocflags & VM_ALLOC_SBUSY) == 0 ||
3400	    (allocflags & VM_ALLOC_IGN_SBUSY) != 0,
3401	    ("vm_page_grab: VM_ALLOC_SBUSY/VM_ALLOC_IGN_SBUSY mismatch"));
3402retrylookup:
3403	if ((m = vm_page_lookup(object, pindex)) != NULL) {
3404		sleep = (allocflags & VM_ALLOC_IGN_SBUSY) != 0 ?
3405		    vm_page_xbusied(m) : vm_page_busied(m);
3406		if (sleep) {
3407			if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3408				return (NULL);
3409			/*
3410			 * Reference the page before unlocking and
3411			 * sleeping so that the page daemon is less
3412			 * likely to reclaim it.
3413			 */
3414			vm_page_aflag_set(m, PGA_REFERENCED);
3415			vm_page_lock(m);
3416			VM_OBJECT_WUNLOCK(object);
3417			vm_page_busy_sleep(m, "pgrbwt");
3418			VM_OBJECT_WLOCK(object);
3419			goto retrylookup;
3420		} else {
3421			if ((allocflags & VM_ALLOC_WIRED) != 0) {
3422				vm_page_lock(m);
3423				vm_page_wire(m);
3424				vm_page_unlock(m);
3425			}
3426			if ((allocflags &
3427			    (VM_ALLOC_NOBUSY | VM_ALLOC_SBUSY)) == 0)
3428				vm_page_xbusy(m);
3429			if ((allocflags & VM_ALLOC_SBUSY) != 0)
3430				vm_page_sbusy(m);
3431			return (m);
3432		}
3433	}
3434	m = vm_page_alloc(object, pindex, allocflags);
3435	if (m == NULL) {
3436		if ((allocflags & VM_ALLOC_NOWAIT) != 0)
3437			return (NULL);
3438		VM_OBJECT_WUNLOCK(object);
3439		VM_WAIT;
3440		VM_OBJECT_WLOCK(object);
3441		goto retrylookup;
3442	} else if (m->valid != 0)
3443		return (m);
3444	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
3445		pmap_zero_page(m);
3446	return (m);
3447}
3448
3449/*
3450 * Mapping function for valid or dirty bits in a page.
3451 *
3452 * Inputs are required to range within a page.
3453 */
3454vm_page_bits_t
3455vm_page_bits(int base, int size)
3456{
3457	int first_bit;
3458	int last_bit;
3459
3460	KASSERT(
3461	    base + size <= PAGE_SIZE,
3462	    ("vm_page_bits: illegal base/size %d/%d", base, size)
3463	);
3464
3465	if (size == 0)		/* handle degenerate case */
3466		return (0);
3467
3468	first_bit = base >> DEV_BSHIFT;
3469	last_bit = (base + size - 1) >> DEV_BSHIFT;
3470
3471	return (((vm_page_bits_t)2 << last_bit) -
3472	    ((vm_page_bits_t)1 << first_bit));
3473}
3474
3475/*
3476 *	vm_page_set_valid_range:
3477 *
3478 *	Sets portions of a page valid.  The arguments are expected
3479 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
3480 *	of any partial chunks touched by the range.  The invalid portion of
3481 *	such chunks will be zeroed.
3482 *
3483 *	(base + size) must be less then or equal to PAGE_SIZE.
3484 */
3485void
3486vm_page_set_valid_range(vm_page_t m, int base, int size)
3487{
3488	int endoff, frag;
3489
3490	VM_OBJECT_ASSERT_WLOCKED(m->object);
3491	if (size == 0)	/* handle degenerate case */
3492		return;
3493
3494	/*
3495	 * If the base is not DEV_BSIZE aligned and the valid
3496	 * bit is clear, we have to zero out a portion of the
3497	 * first block.
3498	 */
3499	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
3500	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
3501		pmap_zero_page_area(m, frag, base - frag);
3502
3503	/*
3504	 * If the ending offset is not DEV_BSIZE aligned and the
3505	 * valid bit is clear, we have to zero out a portion of
3506	 * the last block.
3507	 */
3508	endoff = base + size;
3509	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
3510	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
3511		pmap_zero_page_area(m, endoff,
3512		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
3513
3514	/*
3515	 * Assert that no previously invalid block that is now being validated
3516	 * is already dirty.
3517	 */
3518	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
3519	    ("vm_page_set_valid_range: page %p is dirty", m));
3520
3521	/*
3522	 * Set valid bits inclusive of any overlap.
3523	 */
3524	m->valid |= vm_page_bits(base, size);
3525}
3526
3527/*
3528 * Clear the given bits from the specified page's dirty field.
3529 */
3530static __inline void
3531vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
3532{
3533	uintptr_t addr;
3534#if PAGE_SIZE < 16384
3535	int shift;
3536#endif
3537
3538	/*
3539	 * If the object is locked and the page is neither exclusive busy nor
3540	 * write mapped, then the page's dirty field cannot possibly be
3541	 * set by a concurrent pmap operation.
3542	 */
3543	VM_OBJECT_ASSERT_WLOCKED(m->object);
3544	if (!vm_page_xbusied(m) && !pmap_page_is_write_mapped(m))
3545		m->dirty &= ~pagebits;
3546	else {
3547		/*
3548		 * The pmap layer can call vm_page_dirty() without
3549		 * holding a distinguished lock.  The combination of
3550		 * the object's lock and an atomic operation suffice
3551		 * to guarantee consistency of the page dirty field.
3552		 *
3553		 * For PAGE_SIZE == 32768 case, compiler already
3554		 * properly aligns the dirty field, so no forcible
3555		 * alignment is needed. Only require existence of
3556		 * atomic_clear_64 when page size is 32768.
3557		 */
3558		addr = (uintptr_t)&m->dirty;
3559#if PAGE_SIZE == 32768
3560		atomic_clear_64((uint64_t *)addr, pagebits);
3561#elif PAGE_SIZE == 16384
3562		atomic_clear_32((uint32_t *)addr, pagebits);
3563#else		/* PAGE_SIZE <= 8192 */
3564		/*
3565		 * Use a trick to perform a 32-bit atomic on the
3566		 * containing aligned word, to not depend on the existence
3567		 * of atomic_clear_{8, 16}.
3568		 */
3569		shift = addr & (sizeof(uint32_t) - 1);
3570#if BYTE_ORDER == BIG_ENDIAN
3571		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
3572#else
3573		shift *= NBBY;
3574#endif
3575		addr &= ~(sizeof(uint32_t) - 1);
3576		atomic_clear_32((uint32_t *)addr, pagebits << shift);
3577#endif		/* PAGE_SIZE */
3578	}
3579}
3580
3581/*
3582 *	vm_page_set_validclean:
3583 *
3584 *	Sets portions of a page valid and clean.  The arguments are expected
3585 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
3586 *	of any partial chunks touched by the range.  The invalid portion of
3587 *	such chunks will be zero'd.
3588 *
3589 *	(base + size) must be less then or equal to PAGE_SIZE.
3590 */
3591void
3592vm_page_set_validclean(vm_page_t m, int base, int size)
3593{
3594	vm_page_bits_t oldvalid, pagebits;
3595	int endoff, frag;
3596
3597	VM_OBJECT_ASSERT_WLOCKED(m->object);
3598	if (size == 0)	/* handle degenerate case */
3599		return;
3600
3601	/*
3602	 * If the base is not DEV_BSIZE aligned and the valid
3603	 * bit is clear, we have to zero out a portion of the
3604	 * first block.
3605	 */
3606	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
3607	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
3608		pmap_zero_page_area(m, frag, base - frag);
3609
3610	/*
3611	 * If the ending offset is not DEV_BSIZE aligned and the
3612	 * valid bit is clear, we have to zero out a portion of
3613	 * the last block.
3614	 */
3615	endoff = base + size;
3616	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
3617	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
3618		pmap_zero_page_area(m, endoff,
3619		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
3620
3621	/*
3622	 * Set valid, clear dirty bits.  If validating the entire
3623	 * page we can safely clear the pmap modify bit.  We also
3624	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
3625	 * takes a write fault on a MAP_NOSYNC memory area the flag will
3626	 * be set again.
3627	 *
3628	 * We set valid bits inclusive of any overlap, but we can only
3629	 * clear dirty bits for DEV_BSIZE chunks that are fully within
3630	 * the range.
3631	 */
3632	oldvalid = m->valid;
3633	pagebits = vm_page_bits(base, size);
3634	m->valid |= pagebits;
3635#if 0	/* NOT YET */
3636	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
3637		frag = DEV_BSIZE - frag;
3638		base += frag;
3639		size -= frag;
3640		if (size < 0)
3641			size = 0;
3642	}
3643	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
3644#endif
3645	if (base == 0 && size == PAGE_SIZE) {
3646		/*
3647		 * The page can only be modified within the pmap if it is
3648		 * mapped, and it can only be mapped if it was previously
3649		 * fully valid.
3650		 */
3651		if (oldvalid == VM_PAGE_BITS_ALL)
3652			/*
3653			 * Perform the pmap_clear_modify() first.  Otherwise,
3654			 * a concurrent pmap operation, such as
3655			 * pmap_protect(), could clear a modification in the
3656			 * pmap and set the dirty field on the page before
3657			 * pmap_clear_modify() had begun and after the dirty
3658			 * field was cleared here.
3659			 */
3660			pmap_clear_modify(m);
3661		m->dirty = 0;
3662		m->oflags &= ~VPO_NOSYNC;
3663	} else if (oldvalid != VM_PAGE_BITS_ALL)
3664		m->dirty &= ~pagebits;
3665	else
3666		vm_page_clear_dirty_mask(m, pagebits);
3667}
3668
3669void
3670vm_page_clear_dirty(vm_page_t m, int base, int size)
3671{
3672
3673	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
3674}
3675
3676/*
3677 *	vm_page_set_invalid:
3678 *
3679 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
3680 *	valid and dirty bits for the effected areas are cleared.
3681 */
3682void
3683vm_page_set_invalid(vm_page_t m, int base, int size)
3684{
3685	vm_page_bits_t bits;
3686	vm_object_t object;
3687
3688	object = m->object;
3689	VM_OBJECT_ASSERT_WLOCKED(object);
3690	if (object->type == OBJT_VNODE && base == 0 && IDX_TO_OFF(m->pindex) +
3691	    size >= object->un_pager.vnp.vnp_size)
3692		bits = VM_PAGE_BITS_ALL;
3693	else
3694		bits = vm_page_bits(base, size);
3695	if (object->ref_count != 0 && m->valid == VM_PAGE_BITS_ALL &&
3696	    bits != 0)
3697		pmap_remove_all(m);
3698	KASSERT((bits == 0 && m->valid == VM_PAGE_BITS_ALL) ||
3699	    !pmap_page_is_mapped(m),
3700	    ("vm_page_set_invalid: page %p is mapped", m));
3701	m->valid &= ~bits;
3702	m->dirty &= ~bits;
3703}
3704
3705/*
3706 * vm_page_zero_invalid()
3707 *
3708 *	The kernel assumes that the invalid portions of a page contain
3709 *	garbage, but such pages can be mapped into memory by user code.
3710 *	When this occurs, we must zero out the non-valid portions of the
3711 *	page so user code sees what it expects.
3712 *
3713 *	Pages are most often semi-valid when the end of a file is mapped
3714 *	into memory and the file's size is not page aligned.
3715 */
3716void
3717vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
3718{
3719	int b;
3720	int i;
3721
3722	VM_OBJECT_ASSERT_WLOCKED(m->object);
3723	/*
3724	 * Scan the valid bits looking for invalid sections that
3725	 * must be zeroed.  Invalid sub-DEV_BSIZE'd areas ( where the
3726	 * valid bit may be set ) have already been zeroed by
3727	 * vm_page_set_validclean().
3728	 */
3729	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
3730		if (i == (PAGE_SIZE / DEV_BSIZE) ||
3731		    (m->valid & ((vm_page_bits_t)1 << i))) {
3732			if (i > b) {
3733				pmap_zero_page_area(m,
3734				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
3735			}
3736			b = i + 1;
3737		}
3738	}
3739
3740	/*
3741	 * setvalid is TRUE when we can safely set the zero'd areas
3742	 * as being valid.  We can do this if there are no cache consistancy
3743	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
3744	 */
3745	if (setvalid)
3746		m->valid = VM_PAGE_BITS_ALL;
3747}
3748
3749/*
3750 *	vm_page_is_valid:
3751 *
3752 *	Is (partial) page valid?  Note that the case where size == 0
3753 *	will return FALSE in the degenerate case where the page is
3754 *	entirely invalid, and TRUE otherwise.
3755 */
3756int
3757vm_page_is_valid(vm_page_t m, int base, int size)
3758{
3759	vm_page_bits_t bits;
3760
3761	VM_OBJECT_ASSERT_LOCKED(m->object);
3762	bits = vm_page_bits(base, size);
3763	return (m->valid != 0 && (m->valid & bits) == bits);
3764}
3765
3766/*
3767 *	vm_page_ps_is_valid:
3768 *
3769 *	Returns TRUE if the entire (super)page is valid and FALSE otherwise.
3770 */
3771boolean_t
3772vm_page_ps_is_valid(vm_page_t m)
3773{
3774	int i, npages;
3775
3776	VM_OBJECT_ASSERT_LOCKED(m->object);
3777	npages = atop(pagesizes[m->psind]);
3778
3779	/*
3780	 * The physically contiguous pages that make up a superpage, i.e., a
3781	 * page with a page size index ("psind") greater than zero, will
3782	 * occupy adjacent entries in vm_page_array[].
3783	 */
3784	for (i = 0; i < npages; i++) {
3785		if (m[i].valid != VM_PAGE_BITS_ALL)
3786			return (FALSE);
3787	}
3788	return (TRUE);
3789}
3790
3791/*
3792 * Set the page's dirty bits if the page is modified.
3793 */
3794void
3795vm_page_test_dirty(vm_page_t m)
3796{
3797
3798	VM_OBJECT_ASSERT_WLOCKED(m->object);
3799	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
3800		vm_page_dirty(m);
3801}
3802
3803void
3804vm_page_lock_KBI(vm_page_t m, const char *file, int line)
3805{
3806
3807	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
3808}
3809
3810void
3811vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
3812{
3813
3814	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
3815}
3816
3817int
3818vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
3819{
3820
3821	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
3822}
3823
3824#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
3825void
3826vm_page_assert_locked_KBI(vm_page_t m, const char *file, int line)
3827{
3828
3829	vm_page_lock_assert_KBI(m, MA_OWNED, file, line);
3830}
3831
3832void
3833vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
3834{
3835
3836	mtx_assert_(vm_page_lockptr(m), a, file, line);
3837}
3838#endif
3839
3840#ifdef INVARIANTS
3841void
3842vm_page_object_lock_assert(vm_page_t m)
3843{
3844
3845	/*
3846	 * Certain of the page's fields may only be modified by the
3847	 * holder of the containing object's lock or the exclusive busy.
3848	 * holder.  Unfortunately, the holder of the write busy is
3849	 * not recorded, and thus cannot be checked here.
3850	 */
3851	if (m->object != NULL && !vm_page_xbusied(m))
3852		VM_OBJECT_ASSERT_WLOCKED(m->object);
3853}
3854
3855void
3856vm_page_assert_pga_writeable(vm_page_t m, uint8_t bits)
3857{
3858
3859	if ((bits & PGA_WRITEABLE) == 0)
3860		return;
3861
3862	/*
3863	 * The PGA_WRITEABLE flag can only be set if the page is
3864	 * managed, is exclusively busied or the object is locked.
3865	 * Currently, this flag is only set by pmap_enter().
3866	 */
3867	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3868	    ("PGA_WRITEABLE on unmanaged page"));
3869	if (!vm_page_xbusied(m))
3870		VM_OBJECT_ASSERT_LOCKED(m->object);
3871}
3872#endif
3873
3874#include "opt_ddb.h"
3875#ifdef DDB
3876#include <sys/kernel.h>
3877
3878#include <ddb/ddb.h>
3879
3880DB_SHOW_COMMAND(page, vm_page_print_page_info)
3881{
3882	db_printf("vm_cnt.v_free_count: %d\n", vm_cnt.v_free_count);
3883	db_printf("vm_cnt.v_cache_count: %d\n", vm_cnt.v_cache_count);
3884	db_printf("vm_cnt.v_inactive_count: %d\n", vm_cnt.v_inactive_count);
3885	db_printf("vm_cnt.v_active_count: %d\n", vm_cnt.v_active_count);
3886	db_printf("vm_cnt.v_wire_count: %d\n", vm_cnt.v_wire_count);
3887	db_printf("vm_cnt.v_free_reserved: %d\n", vm_cnt.v_free_reserved);
3888	db_printf("vm_cnt.v_free_min: %d\n", vm_cnt.v_free_min);
3889	db_printf("vm_cnt.v_free_target: %d\n", vm_cnt.v_free_target);
3890	db_printf("vm_cnt.v_inactive_target: %d\n", vm_cnt.v_inactive_target);
3891}
3892
3893DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3894{
3895	int dom;
3896
3897	db_printf("pq_free %d pq_cache %d\n",
3898	    vm_cnt.v_free_count, vm_cnt.v_cache_count);
3899	for (dom = 0; dom < vm_ndomains; dom++) {
3900		db_printf(
3901	"dom %d page_cnt %d free %d pq_act %d pq_inact %d pass %d\n",
3902		    dom,
3903		    vm_dom[dom].vmd_page_count,
3904		    vm_dom[dom].vmd_free_count,
3905		    vm_dom[dom].vmd_pagequeues[PQ_ACTIVE].pq_cnt,
3906		    vm_dom[dom].vmd_pagequeues[PQ_INACTIVE].pq_cnt,
3907		    vm_dom[dom].vmd_pass);
3908	}
3909}
3910
3911DB_SHOW_COMMAND(pginfo, vm_page_print_pginfo)
3912{
3913	vm_page_t m;
3914	boolean_t phys;
3915
3916	if (!have_addr) {
3917		db_printf("show pginfo addr\n");
3918		return;
3919	}
3920
3921	phys = strchr(modif, 'p') != NULL;
3922	if (phys)
3923		m = PHYS_TO_VM_PAGE(addr);
3924	else
3925		m = (vm_page_t)addr;
3926	db_printf(
3927    "page %p obj %p pidx 0x%jx phys 0x%jx q %d hold %d wire %d\n"
3928    "  af 0x%x of 0x%x f 0x%x act %d busy %x valid 0x%x dirty 0x%x\n",
3929	    m, m->object, (uintmax_t)m->pindex, (uintmax_t)m->phys_addr,
3930	    m->queue, m->hold_count, m->wire_count, m->aflags, m->oflags,
3931	    m->flags, m->act_count, m->busy_lock, m->valid, m->dirty);
3932}
3933#endif /* DDB */
3934