vm_page.c revision 248084
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34 */
35
36/*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *			GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 *	- A page queue lock is required when adding or removing a page from a
67 *	  page queue (vm_pagequeues[]), regardless of other locks or the
68 *	  busy state of a page.
69 *
70 *		* In general, no thread besides the page daemon can acquire or
71 *		  hold more than one page queue lock at a time.
72 *
73 *		* The page daemon can acquire and hold any pair of page queue
74 *		  locks in any order.
75 *
76 *	- The object mutex is held when inserting or removing
77 *	  pages from an object (vm_page_insert() or vm_page_remove()).
78 *
79 */
80
81/*
82 *	Resident memory management module.
83 */
84
85#include <sys/cdefs.h>
86__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 248084 2013-03-09 02:32:23Z attilio $");
87
88#include "opt_vm.h"
89
90#include <sys/param.h>
91#include <sys/systm.h>
92#include <sys/lock.h>
93#include <sys/kernel.h>
94#include <sys/limits.h>
95#include <sys/malloc.h>
96#include <sys/msgbuf.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/rwlock.h>
100#include <sys/sysctl.h>
101#include <sys/vmmeter.h>
102#include <sys/vnode.h>
103
104#include <vm/vm.h>
105#include <vm/pmap.h>
106#include <vm/vm_param.h>
107#include <vm/vm_kern.h>
108#include <vm/vm_object.h>
109#include <vm/vm_page.h>
110#include <vm/vm_pageout.h>
111#include <vm/vm_pager.h>
112#include <vm/vm_phys.h>
113#include <vm/vm_reserv.h>
114#include <vm/vm_extern.h>
115#include <vm/uma.h>
116#include <vm/uma_int.h>
117
118#include <machine/md_var.h>
119
120/*
121 *	Associated with page of user-allocatable memory is a
122 *	page structure.
123 */
124
125struct vm_pagequeue vm_pagequeues[PQ_COUNT] = {
126	[PQ_INACTIVE] = {
127		.pq_pl = TAILQ_HEAD_INITIALIZER(
128		    vm_pagequeues[PQ_INACTIVE].pq_pl),
129		.pq_cnt = &cnt.v_inactive_count,
130		.pq_name = "vm inactive pagequeue"
131	},
132	[PQ_ACTIVE] = {
133		.pq_pl = TAILQ_HEAD_INITIALIZER(
134		    vm_pagequeues[PQ_ACTIVE].pq_pl),
135		.pq_cnt = &cnt.v_active_count,
136		.pq_name = "vm active pagequeue"
137	}
138};
139struct mtx_padalign vm_page_queue_free_mtx;
140
141struct mtx_padalign pa_lock[PA_LOCK_COUNT];
142
143vm_page_t vm_page_array;
144long vm_page_array_size;
145long first_page;
146int vm_page_zero_count;
147
148static int boot_pages = UMA_BOOT_PAGES;
149TUNABLE_INT("vm.boot_pages", &boot_pages);
150SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
151	"number of pages allocated for bootstrapping the VM system");
152
153static int pa_tryrelock_restart;
154SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
155    &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
156
157static uma_zone_t fakepg_zone;
158
159static struct vnode *vm_page_alloc_init(vm_page_t m);
160static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
161static void vm_page_enqueue(int queue, vm_page_t m);
162static void vm_page_init_fakepg(void *dummy);
163
164SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
165
166static void
167vm_page_init_fakepg(void *dummy)
168{
169
170	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
171	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
172}
173
174/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
175#if PAGE_SIZE == 32768
176#ifdef CTASSERT
177CTASSERT(sizeof(u_long) >= 8);
178#endif
179#endif
180
181/*
182 * Try to acquire a physical address lock while a pmap is locked.  If we
183 * fail to trylock we unlock and lock the pmap directly and cache the
184 * locked pa in *locked.  The caller should then restart their loop in case
185 * the virtual to physical mapping has changed.
186 */
187int
188vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
189{
190	vm_paddr_t lockpa;
191
192	lockpa = *locked;
193	*locked = pa;
194	if (lockpa) {
195		PA_LOCK_ASSERT(lockpa, MA_OWNED);
196		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
197			return (0);
198		PA_UNLOCK(lockpa);
199	}
200	if (PA_TRYLOCK(pa))
201		return (0);
202	PMAP_UNLOCK(pmap);
203	atomic_add_int(&pa_tryrelock_restart, 1);
204	PA_LOCK(pa);
205	PMAP_LOCK(pmap);
206	return (EAGAIN);
207}
208
209/*
210 *	vm_set_page_size:
211 *
212 *	Sets the page size, perhaps based upon the memory
213 *	size.  Must be called before any use of page-size
214 *	dependent functions.
215 */
216void
217vm_set_page_size(void)
218{
219	if (cnt.v_page_size == 0)
220		cnt.v_page_size = PAGE_SIZE;
221	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
222		panic("vm_set_page_size: page size not a power of two");
223}
224
225/*
226 *	vm_page_blacklist_lookup:
227 *
228 *	See if a physical address in this page has been listed
229 *	in the blacklist tunable.  Entries in the tunable are
230 *	separated by spaces or commas.  If an invalid integer is
231 *	encountered then the rest of the string is skipped.
232 */
233static int
234vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
235{
236	vm_paddr_t bad;
237	char *cp, *pos;
238
239	for (pos = list; *pos != '\0'; pos = cp) {
240		bad = strtoq(pos, &cp, 0);
241		if (*cp != '\0') {
242			if (*cp == ' ' || *cp == ',') {
243				cp++;
244				if (cp == pos)
245					continue;
246			} else
247				break;
248		}
249		if (pa == trunc_page(bad))
250			return (1);
251	}
252	return (0);
253}
254
255/*
256 *	vm_page_startup:
257 *
258 *	Initializes the resident memory module.
259 *
260 *	Allocates memory for the page cells, and
261 *	for the object/offset-to-page hash table headers.
262 *	Each page cell is initialized and placed on the free list.
263 */
264vm_offset_t
265vm_page_startup(vm_offset_t vaddr)
266{
267	vm_offset_t mapped;
268	vm_paddr_t page_range;
269	vm_paddr_t new_end;
270	int i;
271	vm_paddr_t pa;
272	vm_paddr_t last_pa;
273	char *list;
274
275	/* the biggest memory array is the second group of pages */
276	vm_paddr_t end;
277	vm_paddr_t biggestsize;
278	vm_paddr_t low_water, high_water;
279	int biggestone;
280
281	biggestsize = 0;
282	biggestone = 0;
283	vaddr = round_page(vaddr);
284
285	for (i = 0; phys_avail[i + 1]; i += 2) {
286		phys_avail[i] = round_page(phys_avail[i]);
287		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
288	}
289
290	low_water = phys_avail[0];
291	high_water = phys_avail[1];
292
293	for (i = 0; phys_avail[i + 1]; i += 2) {
294		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
295
296		if (size > biggestsize) {
297			biggestone = i;
298			biggestsize = size;
299		}
300		if (phys_avail[i] < low_water)
301			low_water = phys_avail[i];
302		if (phys_avail[i + 1] > high_water)
303			high_water = phys_avail[i + 1];
304	}
305
306#ifdef XEN
307	low_water = 0;
308#endif
309
310	end = phys_avail[biggestone+1];
311
312	/*
313	 * Initialize the page and queue locks.
314	 */
315	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
316	for (i = 0; i < PA_LOCK_COUNT; i++)
317		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
318	for (i = 0; i < PQ_COUNT; i++)
319		vm_pagequeue_init_lock(&vm_pagequeues[i]);
320
321	/*
322	 * Allocate memory for use when boot strapping the kernel memory
323	 * allocator.
324	 */
325	new_end = end - (boot_pages * UMA_SLAB_SIZE);
326	new_end = trunc_page(new_end);
327	mapped = pmap_map(&vaddr, new_end, end,
328	    VM_PROT_READ | VM_PROT_WRITE);
329	bzero((void *)mapped, end - new_end);
330	uma_startup((void *)mapped, boot_pages);
331
332#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
333    defined(__mips__)
334	/*
335	 * Allocate a bitmap to indicate that a random physical page
336	 * needs to be included in a minidump.
337	 *
338	 * The amd64 port needs this to indicate which direct map pages
339	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
340	 *
341	 * However, i386 still needs this workspace internally within the
342	 * minidump code.  In theory, they are not needed on i386, but are
343	 * included should the sf_buf code decide to use them.
344	 */
345	last_pa = 0;
346	for (i = 0; dump_avail[i + 1] != 0; i += 2)
347		if (dump_avail[i + 1] > last_pa)
348			last_pa = dump_avail[i + 1];
349	page_range = last_pa / PAGE_SIZE;
350	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
351	new_end -= vm_page_dump_size;
352	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
353	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
354	bzero((void *)vm_page_dump, vm_page_dump_size);
355#endif
356#ifdef __amd64__
357	/*
358	 * Request that the physical pages underlying the message buffer be
359	 * included in a crash dump.  Since the message buffer is accessed
360	 * through the direct map, they are not automatically included.
361	 */
362	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
363	last_pa = pa + round_page(msgbufsize);
364	while (pa < last_pa) {
365		dump_add_page(pa);
366		pa += PAGE_SIZE;
367	}
368#endif
369	/*
370	 * Compute the number of pages of memory that will be available for
371	 * use (taking into account the overhead of a page structure per
372	 * page).
373	 */
374	first_page = low_water / PAGE_SIZE;
375#ifdef VM_PHYSSEG_SPARSE
376	page_range = 0;
377	for (i = 0; phys_avail[i + 1] != 0; i += 2)
378		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
379#elif defined(VM_PHYSSEG_DENSE)
380	page_range = high_water / PAGE_SIZE - first_page;
381#else
382#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
383#endif
384	end = new_end;
385
386	/*
387	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
388	 */
389	vaddr += PAGE_SIZE;
390
391	/*
392	 * Initialize the mem entry structures now, and put them in the free
393	 * queue.
394	 */
395	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
396	mapped = pmap_map(&vaddr, new_end, end,
397	    VM_PROT_READ | VM_PROT_WRITE);
398	vm_page_array = (vm_page_t) mapped;
399#if VM_NRESERVLEVEL > 0
400	/*
401	 * Allocate memory for the reservation management system's data
402	 * structures.
403	 */
404	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
405#endif
406#if defined(__amd64__) || defined(__mips__)
407	/*
408	 * pmap_map on amd64 and mips can come out of the direct-map, not kvm
409	 * like i386, so the pages must be tracked for a crashdump to include
410	 * this data.  This includes the vm_page_array and the early UMA
411	 * bootstrap pages.
412	 */
413	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
414		dump_add_page(pa);
415#endif
416	phys_avail[biggestone + 1] = new_end;
417
418	/*
419	 * Clear all of the page structures
420	 */
421	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
422	for (i = 0; i < page_range; i++)
423		vm_page_array[i].order = VM_NFREEORDER;
424	vm_page_array_size = page_range;
425
426	/*
427	 * Initialize the physical memory allocator.
428	 */
429	vm_phys_init();
430
431	/*
432	 * Add every available physical page that is not blacklisted to
433	 * the free lists.
434	 */
435	cnt.v_page_count = 0;
436	cnt.v_free_count = 0;
437	list = getenv("vm.blacklist");
438	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
439		pa = phys_avail[i];
440		last_pa = phys_avail[i + 1];
441		while (pa < last_pa) {
442			if (list != NULL &&
443			    vm_page_blacklist_lookup(list, pa))
444				printf("Skipping page with pa 0x%jx\n",
445				    (uintmax_t)pa);
446			else
447				vm_phys_add_page(pa);
448			pa += PAGE_SIZE;
449		}
450	}
451	freeenv(list);
452#if VM_NRESERVLEVEL > 0
453	/*
454	 * Initialize the reservation management system.
455	 */
456	vm_reserv_init();
457#endif
458	return (vaddr);
459}
460
461void
462vm_page_reference(vm_page_t m)
463{
464
465	vm_page_aflag_set(m, PGA_REFERENCED);
466}
467
468void
469vm_page_busy(vm_page_t m)
470{
471
472	VM_OBJECT_ASSERT_WLOCKED(m->object);
473	KASSERT((m->oflags & VPO_BUSY) == 0,
474	    ("vm_page_busy: page already busy!!!"));
475	m->oflags |= VPO_BUSY;
476}
477
478/*
479 *      vm_page_flash:
480 *
481 *      wakeup anyone waiting for the page.
482 */
483void
484vm_page_flash(vm_page_t m)
485{
486
487	VM_OBJECT_ASSERT_WLOCKED(m->object);
488	if (m->oflags & VPO_WANTED) {
489		m->oflags &= ~VPO_WANTED;
490		wakeup(m);
491	}
492}
493
494/*
495 *      vm_page_wakeup:
496 *
497 *      clear the VPO_BUSY flag and wakeup anyone waiting for the
498 *      page.
499 *
500 */
501void
502vm_page_wakeup(vm_page_t m)
503{
504
505	VM_OBJECT_ASSERT_WLOCKED(m->object);
506	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
507	m->oflags &= ~VPO_BUSY;
508	vm_page_flash(m);
509}
510
511void
512vm_page_io_start(vm_page_t m)
513{
514
515	VM_OBJECT_ASSERT_WLOCKED(m->object);
516	m->busy++;
517}
518
519void
520vm_page_io_finish(vm_page_t m)
521{
522
523	VM_OBJECT_ASSERT_WLOCKED(m->object);
524	KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
525	m->busy--;
526	if (m->busy == 0)
527		vm_page_flash(m);
528}
529
530/*
531 * Keep page from being freed by the page daemon
532 * much of the same effect as wiring, except much lower
533 * overhead and should be used only for *very* temporary
534 * holding ("wiring").
535 */
536void
537vm_page_hold(vm_page_t mem)
538{
539
540	vm_page_lock_assert(mem, MA_OWNED);
541        mem->hold_count++;
542}
543
544void
545vm_page_unhold(vm_page_t mem)
546{
547
548	vm_page_lock_assert(mem, MA_OWNED);
549	--mem->hold_count;
550	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
551	if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
552		vm_page_free_toq(mem);
553}
554
555/*
556 *	vm_page_unhold_pages:
557 *
558 *	Unhold each of the pages that is referenced by the given array.
559 */
560void
561vm_page_unhold_pages(vm_page_t *ma, int count)
562{
563	struct mtx *mtx, *new_mtx;
564
565	mtx = NULL;
566	for (; count != 0; count--) {
567		/*
568		 * Avoid releasing and reacquiring the same page lock.
569		 */
570		new_mtx = vm_page_lockptr(*ma);
571		if (mtx != new_mtx) {
572			if (mtx != NULL)
573				mtx_unlock(mtx);
574			mtx = new_mtx;
575			mtx_lock(mtx);
576		}
577		vm_page_unhold(*ma);
578		ma++;
579	}
580	if (mtx != NULL)
581		mtx_unlock(mtx);
582}
583
584vm_page_t
585PHYS_TO_VM_PAGE(vm_paddr_t pa)
586{
587	vm_page_t m;
588
589#ifdef VM_PHYSSEG_SPARSE
590	m = vm_phys_paddr_to_vm_page(pa);
591	if (m == NULL)
592		m = vm_phys_fictitious_to_vm_page(pa);
593	return (m);
594#elif defined(VM_PHYSSEG_DENSE)
595	long pi;
596
597	pi = atop(pa);
598	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
599		m = &vm_page_array[pi - first_page];
600		return (m);
601	}
602	return (vm_phys_fictitious_to_vm_page(pa));
603#else
604#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
605#endif
606}
607
608/*
609 *	vm_page_getfake:
610 *
611 *	Create a fictitious page with the specified physical address and
612 *	memory attribute.  The memory attribute is the only the machine-
613 *	dependent aspect of a fictitious page that must be initialized.
614 */
615vm_page_t
616vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
617{
618	vm_page_t m;
619
620	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
621	vm_page_initfake(m, paddr, memattr);
622	return (m);
623}
624
625void
626vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
627{
628
629	if ((m->flags & PG_FICTITIOUS) != 0) {
630		/*
631		 * The page's memattr might have changed since the
632		 * previous initialization.  Update the pmap to the
633		 * new memattr.
634		 */
635		goto memattr;
636	}
637	m->phys_addr = paddr;
638	m->queue = PQ_NONE;
639	/* Fictitious pages don't use "segind". */
640	m->flags = PG_FICTITIOUS;
641	/* Fictitious pages don't use "order" or "pool". */
642	m->oflags = VPO_BUSY | VPO_UNMANAGED;
643	m->wire_count = 1;
644memattr:
645	pmap_page_set_memattr(m, memattr);
646}
647
648/*
649 *	vm_page_putfake:
650 *
651 *	Release a fictitious page.
652 */
653void
654vm_page_putfake(vm_page_t m)
655{
656
657	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
658	KASSERT((m->flags & PG_FICTITIOUS) != 0,
659	    ("vm_page_putfake: bad page %p", m));
660	uma_zfree(fakepg_zone, m);
661}
662
663/*
664 *	vm_page_updatefake:
665 *
666 *	Update the given fictitious page to the specified physical address and
667 *	memory attribute.
668 */
669void
670vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
671{
672
673	KASSERT((m->flags & PG_FICTITIOUS) != 0,
674	    ("vm_page_updatefake: bad page %p", m));
675	m->phys_addr = paddr;
676	pmap_page_set_memattr(m, memattr);
677}
678
679/*
680 *	vm_page_free:
681 *
682 *	Free a page.
683 */
684void
685vm_page_free(vm_page_t m)
686{
687
688	m->flags &= ~PG_ZERO;
689	vm_page_free_toq(m);
690}
691
692/*
693 *	vm_page_free_zero:
694 *
695 *	Free a page to the zerod-pages queue
696 */
697void
698vm_page_free_zero(vm_page_t m)
699{
700
701	m->flags |= PG_ZERO;
702	vm_page_free_toq(m);
703}
704
705/*
706 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
707 * array which is not the request page.
708 */
709void
710vm_page_readahead_finish(vm_page_t m)
711{
712
713	if (m->valid != 0) {
714		/*
715		 * Since the page is not the requested page, whether
716		 * it should be activated or deactivated is not
717		 * obvious.  Empirical results have shown that
718		 * deactivating the page is usually the best choice,
719		 * unless the page is wanted by another thread.
720		 */
721		if (m->oflags & VPO_WANTED) {
722			vm_page_lock(m);
723			vm_page_activate(m);
724			vm_page_unlock(m);
725		} else {
726			vm_page_lock(m);
727			vm_page_deactivate(m);
728			vm_page_unlock(m);
729		}
730		vm_page_wakeup(m);
731	} else {
732		/*
733		 * Free the completely invalid page.  Such page state
734		 * occurs due to the short read operation which did
735		 * not covered our page at all, or in case when a read
736		 * error happens.
737		 */
738		vm_page_lock(m);
739		vm_page_free(m);
740		vm_page_unlock(m);
741	}
742}
743
744/*
745 *	vm_page_sleep:
746 *
747 *	Sleep and release the page lock.
748 *
749 *	The object containing the given page must be locked.
750 */
751void
752vm_page_sleep(vm_page_t m, const char *msg)
753{
754
755	VM_OBJECT_ASSERT_WLOCKED(m->object);
756	if (mtx_owned(vm_page_lockptr(m)))
757		vm_page_unlock(m);
758
759	/*
760	 * It's possible that while we sleep, the page will get
761	 * unbusied and freed.  If we are holding the object
762	 * lock, we will assume we hold a reference to the object
763	 * such that even if m->object changes, we can re-lock
764	 * it.
765	 */
766	m->oflags |= VPO_WANTED;
767	VM_OBJECT_SLEEP(m->object, m, PVM, msg, 0);
768}
769
770/*
771 *	vm_page_dirty_KBI:		[ internal use only ]
772 *
773 *	Set all bits in the page's dirty field.
774 *
775 *	The object containing the specified page must be locked if the
776 *	call is made from the machine-independent layer.
777 *
778 *	See vm_page_clear_dirty_mask().
779 *
780 *	This function should only be called by vm_page_dirty().
781 */
782void
783vm_page_dirty_KBI(vm_page_t m)
784{
785
786	/* These assertions refer to this operation by its public name. */
787	KASSERT((m->flags & PG_CACHED) == 0,
788	    ("vm_page_dirty: page in cache!"));
789	KASSERT(!VM_PAGE_IS_FREE(m),
790	    ("vm_page_dirty: page is free!"));
791	KASSERT(m->valid == VM_PAGE_BITS_ALL,
792	    ("vm_page_dirty: page is invalid!"));
793	m->dirty = VM_PAGE_BITS_ALL;
794}
795
796/*
797 *	vm_page_splay:
798 *
799 *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
800 *	the vm_page containing the given pindex.  If, however, that
801 *	pindex is not found in the vm_object, returns a vm_page that is
802 *	adjacent to the pindex, coming before or after it.
803 */
804vm_page_t
805vm_page_splay(vm_pindex_t pindex, vm_page_t root)
806{
807	struct vm_page dummy;
808	vm_page_t lefttreemax, righttreemin, y;
809
810	if (root == NULL)
811		return (root);
812	lefttreemax = righttreemin = &dummy;
813	for (;; root = y) {
814		if (pindex < root->pindex) {
815			if ((y = root->left) == NULL)
816				break;
817			if (pindex < y->pindex) {
818				/* Rotate right. */
819				root->left = y->right;
820				y->right = root;
821				root = y;
822				if ((y = root->left) == NULL)
823					break;
824			}
825			/* Link into the new root's right tree. */
826			righttreemin->left = root;
827			righttreemin = root;
828		} else if (pindex > root->pindex) {
829			if ((y = root->right) == NULL)
830				break;
831			if (pindex > y->pindex) {
832				/* Rotate left. */
833				root->right = y->left;
834				y->left = root;
835				root = y;
836				if ((y = root->right) == NULL)
837					break;
838			}
839			/* Link into the new root's left tree. */
840			lefttreemax->right = root;
841			lefttreemax = root;
842		} else
843			break;
844	}
845	/* Assemble the new root. */
846	lefttreemax->right = root->left;
847	righttreemin->left = root->right;
848	root->left = dummy.right;
849	root->right = dummy.left;
850	return (root);
851}
852
853/*
854 *	vm_page_insert:		[ internal use only ]
855 *
856 *	Inserts the given mem entry into the object and object list.
857 *
858 *	The pagetables are not updated but will presumably fault the page
859 *	in if necessary, or if a kernel page the caller will at some point
860 *	enter the page into the kernel's pmap.  We are not allowed to sleep
861 *	here so we *can't* do this anyway.
862 *
863 *	The object must be locked.
864 */
865void
866vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
867{
868	vm_page_t root;
869
870	VM_OBJECT_ASSERT_WLOCKED(object);
871	if (m->object != NULL)
872		panic("vm_page_insert: page already inserted");
873
874	/*
875	 * Record the object/offset pair in this page
876	 */
877	m->object = object;
878	m->pindex = pindex;
879
880	/*
881	 * Now link into the object's ordered list of backed pages.
882	 */
883	root = object->root;
884	if (root == NULL) {
885		m->left = NULL;
886		m->right = NULL;
887		TAILQ_INSERT_TAIL(&object->memq, m, listq);
888	} else {
889		root = vm_page_splay(pindex, root);
890		if (pindex < root->pindex) {
891			m->left = root->left;
892			m->right = root;
893			root->left = NULL;
894			TAILQ_INSERT_BEFORE(root, m, listq);
895		} else if (pindex == root->pindex)
896			panic("vm_page_insert: offset already allocated");
897		else {
898			m->right = root->right;
899			m->left = root;
900			root->right = NULL;
901			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
902		}
903	}
904	object->root = m;
905
906	/*
907	 * Show that the object has one more resident page.
908	 */
909	object->resident_page_count++;
910
911	/*
912	 * Hold the vnode until the last page is released.
913	 */
914	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
915		vhold(object->handle);
916
917	/*
918	 * Since we are inserting a new and possibly dirty page,
919	 * update the object's OBJ_MIGHTBEDIRTY flag.
920	 */
921	if (pmap_page_is_write_mapped(m))
922		vm_object_set_writeable_dirty(object);
923}
924
925/*
926 *	vm_page_remove:
927 *
928 *	Removes the given mem entry from the object/offset-page
929 *	table and the object page list, but do not invalidate/terminate
930 *	the backing store.
931 *
932 *	The underlying pmap entry (if any) is NOT removed here.
933 *
934 *	The object must be locked.  The page must be locked if it is managed.
935 */
936void
937vm_page_remove(vm_page_t m)
938{
939	vm_object_t object;
940	vm_page_t next, prev, root;
941
942	if ((m->oflags & VPO_UNMANAGED) == 0)
943		vm_page_lock_assert(m, MA_OWNED);
944	if ((object = m->object) == NULL)
945		return;
946	VM_OBJECT_ASSERT_WLOCKED(object);
947	if (m->oflags & VPO_BUSY) {
948		m->oflags &= ~VPO_BUSY;
949		vm_page_flash(m);
950	}
951
952	/*
953	 * Now remove from the object's list of backed pages.
954	 */
955	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
956		/*
957		 * Since the page's successor in the list is also its parent
958		 * in the tree, its right subtree must be empty.
959		 */
960		next->left = m->left;
961		KASSERT(m->right == NULL,
962		    ("vm_page_remove: page %p has right child", m));
963	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
964	    prev->right == m) {
965		/*
966		 * Since the page's predecessor in the list is also its parent
967		 * in the tree, its left subtree must be empty.
968		 */
969		KASSERT(m->left == NULL,
970		    ("vm_page_remove: page %p has left child", m));
971		prev->right = m->right;
972	} else {
973		if (m != object->root)
974			vm_page_splay(m->pindex, object->root);
975		if (m->left == NULL)
976			root = m->right;
977		else if (m->right == NULL)
978			root = m->left;
979		else {
980			/*
981			 * Move the page's successor to the root, because
982			 * pages are usually removed in ascending order.
983			 */
984			if (m->right != next)
985				vm_page_splay(m->pindex, m->right);
986			next->left = m->left;
987			root = next;
988		}
989		object->root = root;
990	}
991	TAILQ_REMOVE(&object->memq, m, listq);
992
993	/*
994	 * And show that the object has one fewer resident page.
995	 */
996	object->resident_page_count--;
997
998	/*
999	 * The vnode may now be recycled.
1000	 */
1001	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
1002		vdrop(object->handle);
1003
1004	m->object = NULL;
1005}
1006
1007/*
1008 *	vm_page_lookup:
1009 *
1010 *	Returns the page associated with the object/offset
1011 *	pair specified; if none is found, NULL is returned.
1012 *
1013 *	The object must be locked.
1014 */
1015vm_page_t
1016vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1017{
1018	vm_page_t m;
1019
1020	VM_OBJECT_ASSERT_WLOCKED(object);
1021	if ((m = object->root) != NULL && m->pindex != pindex) {
1022		m = vm_page_splay(pindex, m);
1023		if ((object->root = m)->pindex != pindex)
1024			m = NULL;
1025	}
1026	return (m);
1027}
1028
1029/*
1030 *	vm_page_find_least:
1031 *
1032 *	Returns the page associated with the object with least pindex
1033 *	greater than or equal to the parameter pindex, or NULL.
1034 *
1035 *	The object must be locked.
1036 */
1037vm_page_t
1038vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1039{
1040	vm_page_t m;
1041
1042	VM_OBJECT_ASSERT_WLOCKED(object);
1043	if ((m = TAILQ_FIRST(&object->memq)) != NULL) {
1044		if (m->pindex < pindex) {
1045			m = vm_page_splay(pindex, object->root);
1046			if ((object->root = m)->pindex < pindex)
1047				m = TAILQ_NEXT(m, listq);
1048		}
1049	}
1050	return (m);
1051}
1052
1053/*
1054 * Returns the given page's successor (by pindex) within the object if it is
1055 * resident; if none is found, NULL is returned.
1056 *
1057 * The object must be locked.
1058 */
1059vm_page_t
1060vm_page_next(vm_page_t m)
1061{
1062	vm_page_t next;
1063
1064	VM_OBJECT_ASSERT_WLOCKED(m->object);
1065	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
1066	    next->pindex != m->pindex + 1)
1067		next = NULL;
1068	return (next);
1069}
1070
1071/*
1072 * Returns the given page's predecessor (by pindex) within the object if it is
1073 * resident; if none is found, NULL is returned.
1074 *
1075 * The object must be locked.
1076 */
1077vm_page_t
1078vm_page_prev(vm_page_t m)
1079{
1080	vm_page_t prev;
1081
1082	VM_OBJECT_ASSERT_WLOCKED(m->object);
1083	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1084	    prev->pindex != m->pindex - 1)
1085		prev = NULL;
1086	return (prev);
1087}
1088
1089/*
1090 *	vm_page_rename:
1091 *
1092 *	Move the given memory entry from its
1093 *	current object to the specified target object/offset.
1094 *
1095 *	Note: swap associated with the page must be invalidated by the move.  We
1096 *	      have to do this for several reasons:  (1) we aren't freeing the
1097 *	      page, (2) we are dirtying the page, (3) the VM system is probably
1098 *	      moving the page from object A to B, and will then later move
1099 *	      the backing store from A to B and we can't have a conflict.
1100 *
1101 *	Note: we *always* dirty the page.  It is necessary both for the
1102 *	      fact that we moved it, and because we may be invalidating
1103 *	      swap.  If the page is on the cache, we have to deactivate it
1104 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
1105 *	      on the cache.
1106 *
1107 *	The objects must be locked.  The page must be locked if it is managed.
1108 */
1109void
1110vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1111{
1112
1113	vm_page_remove(m);
1114	vm_page_insert(m, new_object, new_pindex);
1115	vm_page_dirty(m);
1116}
1117
1118/*
1119 *	Convert all of the given object's cached pages that have a
1120 *	pindex within the given range into free pages.  If the value
1121 *	zero is given for "end", then the range's upper bound is
1122 *	infinity.  If the given object is backed by a vnode and it
1123 *	transitions from having one or more cached pages to none, the
1124 *	vnode's hold count is reduced.
1125 */
1126void
1127vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1128{
1129	vm_page_t m, m_next;
1130	boolean_t empty;
1131
1132	mtx_lock(&vm_page_queue_free_mtx);
1133	if (__predict_false(vm_object_cache_is_empty(object))) {
1134		mtx_unlock(&vm_page_queue_free_mtx);
1135		return;
1136	}
1137	m = object->cache = vm_page_splay(start, object->cache);
1138	if (m->pindex < start) {
1139		if (m->right == NULL)
1140			m = NULL;
1141		else {
1142			m_next = vm_page_splay(start, m->right);
1143			m_next->left = m;
1144			m->right = NULL;
1145			m = object->cache = m_next;
1146		}
1147	}
1148
1149	/*
1150	 * At this point, "m" is either (1) a reference to the page
1151	 * with the least pindex that is greater than or equal to
1152	 * "start" or (2) NULL.
1153	 */
1154	for (; m != NULL && (m->pindex < end || end == 0); m = m_next) {
1155		/*
1156		 * Find "m"'s successor and remove "m" from the
1157		 * object's cache.
1158		 */
1159		if (m->right == NULL) {
1160			object->cache = m->left;
1161			m_next = NULL;
1162		} else {
1163			m_next = vm_page_splay(start, m->right);
1164			m_next->left = m->left;
1165			object->cache = m_next;
1166		}
1167		/* Convert "m" to a free page. */
1168		m->object = NULL;
1169		m->valid = 0;
1170		/* Clear PG_CACHED and set PG_FREE. */
1171		m->flags ^= PG_CACHED | PG_FREE;
1172		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
1173		    ("vm_page_cache_free: page %p has inconsistent flags", m));
1174		cnt.v_cache_count--;
1175		cnt.v_free_count++;
1176	}
1177	empty = vm_object_cache_is_empty(object);
1178	mtx_unlock(&vm_page_queue_free_mtx);
1179	if (object->type == OBJT_VNODE && empty)
1180		vdrop(object->handle);
1181}
1182
1183/*
1184 *	Returns the cached page that is associated with the given
1185 *	object and offset.  If, however, none exists, returns NULL.
1186 *
1187 *	The free page queue must be locked.
1188 */
1189static inline vm_page_t
1190vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1191{
1192	vm_page_t m;
1193
1194	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1195	if ((m = object->cache) != NULL && m->pindex != pindex) {
1196		m = vm_page_splay(pindex, m);
1197		if ((object->cache = m)->pindex != pindex)
1198			m = NULL;
1199	}
1200	return (m);
1201}
1202
1203/*
1204 *	Remove the given cached page from its containing object's
1205 *	collection of cached pages.
1206 *
1207 *	The free page queue must be locked.
1208 */
1209static void
1210vm_page_cache_remove(vm_page_t m)
1211{
1212	vm_object_t object;
1213	vm_page_t root;
1214
1215	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1216	KASSERT((m->flags & PG_CACHED) != 0,
1217	    ("vm_page_cache_remove: page %p is not cached", m));
1218	object = m->object;
1219	if (m != object->cache) {
1220		root = vm_page_splay(m->pindex, object->cache);
1221		KASSERT(root == m,
1222		    ("vm_page_cache_remove: page %p is not cached in object %p",
1223		    m, object));
1224	}
1225	if (m->left == NULL)
1226		root = m->right;
1227	else if (m->right == NULL)
1228		root = m->left;
1229	else {
1230		root = vm_page_splay(m->pindex, m->left);
1231		root->right = m->right;
1232	}
1233	object->cache = root;
1234	m->object = NULL;
1235	cnt.v_cache_count--;
1236}
1237
1238/*
1239 *	Transfer all of the cached pages with offset greater than or
1240 *	equal to 'offidxstart' from the original object's cache to the
1241 *	new object's cache.  However, any cached pages with offset
1242 *	greater than or equal to the new object's size are kept in the
1243 *	original object.  Initially, the new object's cache must be
1244 *	empty.  Offset 'offidxstart' in the original object must
1245 *	correspond to offset zero in the new object.
1246 *
1247 *	The new object must be locked.
1248 */
1249void
1250vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1251    vm_object_t new_object)
1252{
1253	vm_page_t m, m_next;
1254
1255	/*
1256	 * Insertion into an object's collection of cached pages
1257	 * requires the object to be locked.  In contrast, removal does
1258	 * not.
1259	 */
1260	VM_OBJECT_ASSERT_WLOCKED(new_object);
1261	KASSERT(vm_object_cache_is_empty(new_object),
1262	    ("vm_page_cache_transfer: object %p has cached pages",
1263	    new_object));
1264	mtx_lock(&vm_page_queue_free_mtx);
1265	if ((m = orig_object->cache) != NULL) {
1266		/*
1267		 * Transfer all of the pages with offset greater than or
1268		 * equal to 'offidxstart' from the original object's
1269		 * cache to the new object's cache.
1270		 */
1271		m = vm_page_splay(offidxstart, m);
1272		if (m->pindex < offidxstart) {
1273			orig_object->cache = m;
1274			new_object->cache = m->right;
1275			m->right = NULL;
1276		} else {
1277			orig_object->cache = m->left;
1278			new_object->cache = m;
1279			m->left = NULL;
1280		}
1281		while ((m = new_object->cache) != NULL) {
1282			if ((m->pindex - offidxstart) >= new_object->size) {
1283				/*
1284				 * Return all of the cached pages with
1285				 * offset greater than or equal to the
1286				 * new object's size to the original
1287				 * object's cache.
1288				 */
1289				new_object->cache = m->left;
1290				m->left = orig_object->cache;
1291				orig_object->cache = m;
1292				break;
1293			}
1294			m_next = vm_page_splay(m->pindex, m->right);
1295			/* Update the page's object and offset. */
1296			m->object = new_object;
1297			m->pindex -= offidxstart;
1298			if (m_next == NULL)
1299				break;
1300			m->right = NULL;
1301			m_next->left = m;
1302			new_object->cache = m_next;
1303		}
1304		KASSERT(vm_object_cache_is_empty(new_object) ||
1305		    new_object->type == OBJT_SWAP,
1306		    ("vm_page_cache_transfer: object %p's type is incompatible"
1307		    " with cached pages", new_object));
1308	}
1309	mtx_unlock(&vm_page_queue_free_mtx);
1310}
1311
1312/*
1313 *	Returns TRUE if a cached page is associated with the given object and
1314 *	offset, and FALSE otherwise.
1315 *
1316 *	The object must be locked.
1317 */
1318boolean_t
1319vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1320{
1321	vm_page_t m;
1322
1323	/*
1324	 * Insertion into an object's collection of cached pages requires the
1325	 * object to be locked.  Therefore, if the object is locked and the
1326	 * object's collection is empty, there is no need to acquire the free
1327	 * page queues lock in order to prove that the specified page doesn't
1328	 * exist.
1329	 */
1330	VM_OBJECT_ASSERT_WLOCKED(object);
1331	if (__predict_true(vm_object_cache_is_empty(object)))
1332		return (FALSE);
1333	mtx_lock(&vm_page_queue_free_mtx);
1334	m = vm_page_cache_lookup(object, pindex);
1335	mtx_unlock(&vm_page_queue_free_mtx);
1336	return (m != NULL);
1337}
1338
1339/*
1340 *	vm_page_alloc:
1341 *
1342 *	Allocate and return a page that is associated with the specified
1343 *	object and offset pair.  By default, this page has the flag VPO_BUSY
1344 *	set.
1345 *
1346 *	The caller must always specify an allocation class.
1347 *
1348 *	allocation classes:
1349 *	VM_ALLOC_NORMAL		normal process request
1350 *	VM_ALLOC_SYSTEM		system *really* needs a page
1351 *	VM_ALLOC_INTERRUPT	interrupt time request
1352 *
1353 *	optional allocation flags:
1354 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1355 *				intends to allocate
1356 *	VM_ALLOC_IFCACHED	return page only if it is cached
1357 *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1358 *				is cached
1359 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1360 *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1361 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1362 *				should not have the flag VPO_BUSY set
1363 *	VM_ALLOC_WIRED		wire the allocated page
1364 *	VM_ALLOC_ZERO		prefer a zeroed page
1365 *
1366 *	This routine may not sleep.
1367 */
1368vm_page_t
1369vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1370{
1371	struct vnode *vp = NULL;
1372	vm_object_t m_object;
1373	vm_page_t m;
1374	int flags, req_class;
1375
1376	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1377	    ("vm_page_alloc: inconsistent object/req"));
1378	if (object != NULL)
1379		VM_OBJECT_ASSERT_WLOCKED(object);
1380
1381	req_class = req & VM_ALLOC_CLASS_MASK;
1382
1383	/*
1384	 * The page daemon is allowed to dig deeper into the free page list.
1385	 */
1386	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1387		req_class = VM_ALLOC_SYSTEM;
1388
1389	mtx_lock(&vm_page_queue_free_mtx);
1390	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1391	    (req_class == VM_ALLOC_SYSTEM &&
1392	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1393	    (req_class == VM_ALLOC_INTERRUPT &&
1394	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1395		/*
1396		 * Allocate from the free queue if the number of free pages
1397		 * exceeds the minimum for the request class.
1398		 */
1399		if (object != NULL &&
1400		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1401			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1402				mtx_unlock(&vm_page_queue_free_mtx);
1403				return (NULL);
1404			}
1405			if (vm_phys_unfree_page(m))
1406				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1407#if VM_NRESERVLEVEL > 0
1408			else if (!vm_reserv_reactivate_page(m))
1409#else
1410			else
1411#endif
1412				panic("vm_page_alloc: cache page %p is missing"
1413				    " from the free queue", m);
1414		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1415			mtx_unlock(&vm_page_queue_free_mtx);
1416			return (NULL);
1417#if VM_NRESERVLEVEL > 0
1418		} else if (object == NULL || (object->flags & (OBJ_COLORED |
1419		    OBJ_FICTITIOUS)) != OBJ_COLORED ||
1420		    (m = vm_reserv_alloc_page(object, pindex)) == NULL) {
1421#else
1422		} else {
1423#endif
1424			m = vm_phys_alloc_pages(object != NULL ?
1425			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1426#if VM_NRESERVLEVEL > 0
1427			if (m == NULL && vm_reserv_reclaim_inactive()) {
1428				m = vm_phys_alloc_pages(object != NULL ?
1429				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1430				    0);
1431			}
1432#endif
1433		}
1434	} else {
1435		/*
1436		 * Not allocatable, give up.
1437		 */
1438		mtx_unlock(&vm_page_queue_free_mtx);
1439		atomic_add_int(&vm_pageout_deficit,
1440		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1441		pagedaemon_wakeup();
1442		return (NULL);
1443	}
1444
1445	/*
1446	 *  At this point we had better have found a good page.
1447	 */
1448	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1449	KASSERT(m->queue == PQ_NONE,
1450	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1451	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1452	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1453	KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
1454	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1455	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1456	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1457	    pmap_page_get_memattr(m)));
1458	if ((m->flags & PG_CACHED) != 0) {
1459		KASSERT((m->flags & PG_ZERO) == 0,
1460		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1461		KASSERT(m->valid != 0,
1462		    ("vm_page_alloc: cached page %p is invalid", m));
1463		if (m->object == object && m->pindex == pindex)
1464	  		cnt.v_reactivated++;
1465		else
1466			m->valid = 0;
1467		m_object = m->object;
1468		vm_page_cache_remove(m);
1469		if (m_object->type == OBJT_VNODE &&
1470		    vm_object_cache_is_empty(m_object))
1471			vp = m_object->handle;
1472	} else {
1473		KASSERT(VM_PAGE_IS_FREE(m),
1474		    ("vm_page_alloc: page %p is not free", m));
1475		KASSERT(m->valid == 0,
1476		    ("vm_page_alloc: free page %p is valid", m));
1477		cnt.v_free_count--;
1478	}
1479
1480	/*
1481	 * Only the PG_ZERO flag is inherited.  The PG_CACHED or PG_FREE flag
1482	 * must be cleared before the free page queues lock is released.
1483	 */
1484	flags = 0;
1485	if (m->flags & PG_ZERO) {
1486		vm_page_zero_count--;
1487		if (req & VM_ALLOC_ZERO)
1488			flags = PG_ZERO;
1489	}
1490	if (req & VM_ALLOC_NODUMP)
1491		flags |= PG_NODUMP;
1492	m->flags = flags;
1493	mtx_unlock(&vm_page_queue_free_mtx);
1494	m->aflags = 0;
1495	m->oflags = object == NULL || (object->flags & OBJ_UNMANAGED) != 0 ?
1496	    VPO_UNMANAGED : 0;
1497	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
1498		m->oflags |= VPO_BUSY;
1499	if (req & VM_ALLOC_WIRED) {
1500		/*
1501		 * The page lock is not required for wiring a page until that
1502		 * page is inserted into the object.
1503		 */
1504		atomic_add_int(&cnt.v_wire_count, 1);
1505		m->wire_count = 1;
1506	}
1507	m->act_count = 0;
1508
1509	if (object != NULL) {
1510		/* Ignore device objects; the pager sets "memattr" for them. */
1511		if (object->memattr != VM_MEMATTR_DEFAULT &&
1512		    (object->flags & OBJ_FICTITIOUS) == 0)
1513			pmap_page_set_memattr(m, object->memattr);
1514		vm_page_insert(m, object, pindex);
1515	} else
1516		m->pindex = pindex;
1517
1518	/*
1519	 * The following call to vdrop() must come after the above call
1520	 * to vm_page_insert() in case both affect the same object and
1521	 * vnode.  Otherwise, the affected vnode's hold count could
1522	 * temporarily become zero.
1523	 */
1524	if (vp != NULL)
1525		vdrop(vp);
1526
1527	/*
1528	 * Don't wakeup too often - wakeup the pageout daemon when
1529	 * we would be nearly out of memory.
1530	 */
1531	if (vm_paging_needed())
1532		pagedaemon_wakeup();
1533
1534	return (m);
1535}
1536
1537/*
1538 *	vm_page_alloc_contig:
1539 *
1540 *	Allocate a contiguous set of physical pages of the given size "npages"
1541 *	from the free lists.  All of the physical pages must be at or above
1542 *	the given physical address "low" and below the given physical address
1543 *	"high".  The given value "alignment" determines the alignment of the
1544 *	first physical page in the set.  If the given value "boundary" is
1545 *	non-zero, then the set of physical pages cannot cross any physical
1546 *	address boundary that is a multiple of that value.  Both "alignment"
1547 *	and "boundary" must be a power of two.
1548 *
1549 *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1550 *	then the memory attribute setting for the physical pages is configured
1551 *	to the object's memory attribute setting.  Otherwise, the memory
1552 *	attribute setting for the physical pages is configured to "memattr",
1553 *	overriding the object's memory attribute setting.  However, if the
1554 *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1555 *	memory attribute setting for the physical pages cannot be configured
1556 *	to VM_MEMATTR_DEFAULT.
1557 *
1558 *	The caller must always specify an allocation class.
1559 *
1560 *	allocation classes:
1561 *	VM_ALLOC_NORMAL		normal process request
1562 *	VM_ALLOC_SYSTEM		system *really* needs a page
1563 *	VM_ALLOC_INTERRUPT	interrupt time request
1564 *
1565 *	optional allocation flags:
1566 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1567 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1568 *				should not have the flag VPO_BUSY set
1569 *	VM_ALLOC_WIRED		wire the allocated page
1570 *	VM_ALLOC_ZERO		prefer a zeroed page
1571 *
1572 *	This routine may not sleep.
1573 */
1574vm_page_t
1575vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1576    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1577    vm_paddr_t boundary, vm_memattr_t memattr)
1578{
1579	struct vnode *drop;
1580	vm_page_t deferred_vdrop_list, m, m_ret;
1581	u_int flags, oflags;
1582	int req_class;
1583
1584	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1585	    ("vm_page_alloc_contig: inconsistent object/req"));
1586	if (object != NULL) {
1587		VM_OBJECT_ASSERT_WLOCKED(object);
1588		KASSERT(object->type == OBJT_PHYS,
1589		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1590		    object));
1591	}
1592	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1593	req_class = req & VM_ALLOC_CLASS_MASK;
1594
1595	/*
1596	 * The page daemon is allowed to dig deeper into the free page list.
1597	 */
1598	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1599		req_class = VM_ALLOC_SYSTEM;
1600
1601	deferred_vdrop_list = NULL;
1602	mtx_lock(&vm_page_queue_free_mtx);
1603	if (cnt.v_free_count + cnt.v_cache_count >= npages +
1604	    cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1605	    cnt.v_free_count + cnt.v_cache_count >= npages +
1606	    cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1607	    cnt.v_free_count + cnt.v_cache_count >= npages)) {
1608#if VM_NRESERVLEVEL > 0
1609retry:
1610		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1611		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1612		    low, high, alignment, boundary)) == NULL)
1613#endif
1614			m_ret = vm_phys_alloc_contig(npages, low, high,
1615			    alignment, boundary);
1616	} else {
1617		mtx_unlock(&vm_page_queue_free_mtx);
1618		atomic_add_int(&vm_pageout_deficit, npages);
1619		pagedaemon_wakeup();
1620		return (NULL);
1621	}
1622	if (m_ret != NULL)
1623		for (m = m_ret; m < &m_ret[npages]; m++) {
1624			drop = vm_page_alloc_init(m);
1625			if (drop != NULL) {
1626				/*
1627				 * Enqueue the vnode for deferred vdrop().
1628				 *
1629				 * Once the pages are removed from the free
1630				 * page list, "pageq" can be safely abused to
1631				 * construct a short-lived list of vnodes.
1632				 */
1633				m->pageq.tqe_prev = (void *)drop;
1634				m->pageq.tqe_next = deferred_vdrop_list;
1635				deferred_vdrop_list = m;
1636			}
1637		}
1638	else {
1639#if VM_NRESERVLEVEL > 0
1640		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1641		    boundary))
1642			goto retry;
1643#endif
1644	}
1645	mtx_unlock(&vm_page_queue_free_mtx);
1646	if (m_ret == NULL)
1647		return (NULL);
1648
1649	/*
1650	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1651	 */
1652	flags = 0;
1653	if ((req & VM_ALLOC_ZERO) != 0)
1654		flags = PG_ZERO;
1655	if ((req & VM_ALLOC_NODUMP) != 0)
1656		flags |= PG_NODUMP;
1657	if ((req & VM_ALLOC_WIRED) != 0)
1658		atomic_add_int(&cnt.v_wire_count, npages);
1659	oflags = VPO_UNMANAGED;
1660	if (object != NULL) {
1661		if ((req & VM_ALLOC_NOBUSY) == 0)
1662			oflags |= VPO_BUSY;
1663		if (object->memattr != VM_MEMATTR_DEFAULT &&
1664		    memattr == VM_MEMATTR_DEFAULT)
1665			memattr = object->memattr;
1666	}
1667	for (m = m_ret; m < &m_ret[npages]; m++) {
1668		m->aflags = 0;
1669		m->flags = (m->flags | PG_NODUMP) & flags;
1670		if ((req & VM_ALLOC_WIRED) != 0)
1671			m->wire_count = 1;
1672		/* Unmanaged pages don't use "act_count". */
1673		m->oflags = oflags;
1674		if (memattr != VM_MEMATTR_DEFAULT)
1675			pmap_page_set_memattr(m, memattr);
1676		if (object != NULL)
1677			vm_page_insert(m, object, pindex);
1678		else
1679			m->pindex = pindex;
1680		pindex++;
1681	}
1682	while (deferred_vdrop_list != NULL) {
1683		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1684		deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1685	}
1686	if (vm_paging_needed())
1687		pagedaemon_wakeup();
1688	return (m_ret);
1689}
1690
1691/*
1692 * Initialize a page that has been freshly dequeued from a freelist.
1693 * The caller has to drop the vnode returned, if it is not NULL.
1694 *
1695 * This function may only be used to initialize unmanaged pages.
1696 *
1697 * To be called with vm_page_queue_free_mtx held.
1698 */
1699static struct vnode *
1700vm_page_alloc_init(vm_page_t m)
1701{
1702	struct vnode *drop;
1703	vm_object_t m_object;
1704
1705	KASSERT(m->queue == PQ_NONE,
1706	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1707	    m, m->queue));
1708	KASSERT(m->wire_count == 0,
1709	    ("vm_page_alloc_init: page %p is wired", m));
1710	KASSERT(m->hold_count == 0,
1711	    ("vm_page_alloc_init: page %p is held", m));
1712	KASSERT(m->busy == 0,
1713	    ("vm_page_alloc_init: page %p is busy", m));
1714	KASSERT(m->dirty == 0,
1715	    ("vm_page_alloc_init: page %p is dirty", m));
1716	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1717	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1718	    m, pmap_page_get_memattr(m)));
1719	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1720	drop = NULL;
1721	if ((m->flags & PG_CACHED) != 0) {
1722		KASSERT((m->flags & PG_ZERO) == 0,
1723		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1724		m->valid = 0;
1725		m_object = m->object;
1726		vm_page_cache_remove(m);
1727		if (m_object->type == OBJT_VNODE &&
1728		    vm_object_cache_is_empty(m_object))
1729			drop = m_object->handle;
1730	} else {
1731		KASSERT(VM_PAGE_IS_FREE(m),
1732		    ("vm_page_alloc_init: page %p is not free", m));
1733		KASSERT(m->valid == 0,
1734		    ("vm_page_alloc_init: free page %p is valid", m));
1735		cnt.v_free_count--;
1736		if ((m->flags & PG_ZERO) != 0)
1737			vm_page_zero_count--;
1738	}
1739	/* Don't clear the PG_ZERO flag; we'll need it later. */
1740	m->flags &= PG_ZERO;
1741	return (drop);
1742}
1743
1744/*
1745 * 	vm_page_alloc_freelist:
1746 *
1747 *	Allocate a physical page from the specified free page list.
1748 *
1749 *	The caller must always specify an allocation class.
1750 *
1751 *	allocation classes:
1752 *	VM_ALLOC_NORMAL		normal process request
1753 *	VM_ALLOC_SYSTEM		system *really* needs a page
1754 *	VM_ALLOC_INTERRUPT	interrupt time request
1755 *
1756 *	optional allocation flags:
1757 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1758 *				intends to allocate
1759 *	VM_ALLOC_WIRED		wire the allocated page
1760 *	VM_ALLOC_ZERO		prefer a zeroed page
1761 *
1762 *	This routine may not sleep.
1763 */
1764vm_page_t
1765vm_page_alloc_freelist(int flind, int req)
1766{
1767	struct vnode *drop;
1768	vm_page_t m;
1769	u_int flags;
1770	int req_class;
1771
1772	req_class = req & VM_ALLOC_CLASS_MASK;
1773
1774	/*
1775	 * The page daemon is allowed to dig deeper into the free page list.
1776	 */
1777	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1778		req_class = VM_ALLOC_SYSTEM;
1779
1780	/*
1781	 * Do not allocate reserved pages unless the req has asked for it.
1782	 */
1783	mtx_lock(&vm_page_queue_free_mtx);
1784	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1785	    (req_class == VM_ALLOC_SYSTEM &&
1786	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1787	    (req_class == VM_ALLOC_INTERRUPT &&
1788	    cnt.v_free_count + cnt.v_cache_count > 0))
1789		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
1790	else {
1791		mtx_unlock(&vm_page_queue_free_mtx);
1792		atomic_add_int(&vm_pageout_deficit,
1793		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1794		pagedaemon_wakeup();
1795		return (NULL);
1796	}
1797	if (m == NULL) {
1798		mtx_unlock(&vm_page_queue_free_mtx);
1799		return (NULL);
1800	}
1801	drop = vm_page_alloc_init(m);
1802	mtx_unlock(&vm_page_queue_free_mtx);
1803
1804	/*
1805	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1806	 */
1807	m->aflags = 0;
1808	flags = 0;
1809	if ((req & VM_ALLOC_ZERO) != 0)
1810		flags = PG_ZERO;
1811	m->flags &= flags;
1812	if ((req & VM_ALLOC_WIRED) != 0) {
1813		/*
1814		 * The page lock is not required for wiring a page that does
1815		 * not belong to an object.
1816		 */
1817		atomic_add_int(&cnt.v_wire_count, 1);
1818		m->wire_count = 1;
1819	}
1820	/* Unmanaged pages don't use "act_count". */
1821	m->oflags = VPO_UNMANAGED;
1822	if (drop != NULL)
1823		vdrop(drop);
1824	if (vm_paging_needed())
1825		pagedaemon_wakeup();
1826	return (m);
1827}
1828
1829/*
1830 *	vm_wait:	(also see VM_WAIT macro)
1831 *
1832 *	Sleep until free pages are available for allocation.
1833 *	- Called in various places before memory allocations.
1834 */
1835void
1836vm_wait(void)
1837{
1838
1839	mtx_lock(&vm_page_queue_free_mtx);
1840	if (curproc == pageproc) {
1841		vm_pageout_pages_needed = 1;
1842		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1843		    PDROP | PSWP, "VMWait", 0);
1844	} else {
1845		if (!vm_pages_needed) {
1846			vm_pages_needed = 1;
1847			wakeup(&vm_pages_needed);
1848		}
1849		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1850		    "vmwait", 0);
1851	}
1852}
1853
1854/*
1855 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1856 *
1857 *	Sleep until free pages are available for allocation.
1858 *	- Called only in vm_fault so that processes page faulting
1859 *	  can be easily tracked.
1860 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1861 *	  processes will be able to grab memory first.  Do not change
1862 *	  this balance without careful testing first.
1863 */
1864void
1865vm_waitpfault(void)
1866{
1867
1868	mtx_lock(&vm_page_queue_free_mtx);
1869	if (!vm_pages_needed) {
1870		vm_pages_needed = 1;
1871		wakeup(&vm_pages_needed);
1872	}
1873	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1874	    "pfault", 0);
1875}
1876
1877/*
1878 *	vm_page_dequeue:
1879 *
1880 *	Remove the given page from its current page queue.
1881 *
1882 *	The page must be locked.
1883 */
1884void
1885vm_page_dequeue(vm_page_t m)
1886{
1887	struct vm_pagequeue *pq;
1888
1889	vm_page_lock_assert(m, MA_OWNED);
1890	KASSERT(m->queue != PQ_NONE,
1891	    ("vm_page_dequeue: page %p is not queued", m));
1892	pq = &vm_pagequeues[m->queue];
1893	vm_pagequeue_lock(pq);
1894	m->queue = PQ_NONE;
1895	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1896	(*pq->pq_cnt)--;
1897	vm_pagequeue_unlock(pq);
1898}
1899
1900/*
1901 *	vm_page_dequeue_locked:
1902 *
1903 *	Remove the given page from its current page queue.
1904 *
1905 *	The page and page queue must be locked.
1906 */
1907void
1908vm_page_dequeue_locked(vm_page_t m)
1909{
1910	struct vm_pagequeue *pq;
1911
1912	vm_page_lock_assert(m, MA_OWNED);
1913	pq = &vm_pagequeues[m->queue];
1914	vm_pagequeue_assert_locked(pq);
1915	m->queue = PQ_NONE;
1916	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1917	(*pq->pq_cnt)--;
1918}
1919
1920/*
1921 *	vm_page_enqueue:
1922 *
1923 *	Add the given page to the specified page queue.
1924 *
1925 *	The page must be locked.
1926 */
1927static void
1928vm_page_enqueue(int queue, vm_page_t m)
1929{
1930	struct vm_pagequeue *pq;
1931
1932	vm_page_lock_assert(m, MA_OWNED);
1933	pq = &vm_pagequeues[queue];
1934	vm_pagequeue_lock(pq);
1935	m->queue = queue;
1936	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1937	++*pq->pq_cnt;
1938	vm_pagequeue_unlock(pq);
1939}
1940
1941/*
1942 *	vm_page_requeue:
1943 *
1944 *	Move the given page to the tail of its current page queue.
1945 *
1946 *	The page must be locked.
1947 */
1948void
1949vm_page_requeue(vm_page_t m)
1950{
1951	struct vm_pagequeue *pq;
1952
1953	vm_page_lock_assert(m, MA_OWNED);
1954	KASSERT(m->queue != PQ_NONE,
1955	    ("vm_page_requeue: page %p is not queued", m));
1956	pq = &vm_pagequeues[m->queue];
1957	vm_pagequeue_lock(pq);
1958	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1959	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1960	vm_pagequeue_unlock(pq);
1961}
1962
1963/*
1964 *	vm_page_requeue_locked:
1965 *
1966 *	Move the given page to the tail of its current page queue.
1967 *
1968 *	The page queue must be locked.
1969 */
1970void
1971vm_page_requeue_locked(vm_page_t m)
1972{
1973	struct vm_pagequeue *pq;
1974
1975	KASSERT(m->queue != PQ_NONE,
1976	    ("vm_page_requeue_locked: page %p is not queued", m));
1977	pq = &vm_pagequeues[m->queue];
1978	vm_pagequeue_assert_locked(pq);
1979	TAILQ_REMOVE(&pq->pq_pl, m, pageq);
1980	TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
1981}
1982
1983/*
1984 *	vm_page_activate:
1985 *
1986 *	Put the specified page on the active list (if appropriate).
1987 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1988 *	mess with it.
1989 *
1990 *	The page must be locked.
1991 */
1992void
1993vm_page_activate(vm_page_t m)
1994{
1995	int queue;
1996
1997	vm_page_lock_assert(m, MA_OWNED);
1998	VM_OBJECT_ASSERT_WLOCKED(m->object);
1999	if ((queue = m->queue) != PQ_ACTIVE) {
2000		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2001			if (m->act_count < ACT_INIT)
2002				m->act_count = ACT_INIT;
2003			if (queue != PQ_NONE)
2004				vm_page_dequeue(m);
2005			vm_page_enqueue(PQ_ACTIVE, m);
2006		} else
2007			KASSERT(queue == PQ_NONE,
2008			    ("vm_page_activate: wired page %p is queued", m));
2009	} else {
2010		if (m->act_count < ACT_INIT)
2011			m->act_count = ACT_INIT;
2012	}
2013}
2014
2015/*
2016 *	vm_page_free_wakeup:
2017 *
2018 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
2019 *	routine is called when a page has been added to the cache or free
2020 *	queues.
2021 *
2022 *	The page queues must be locked.
2023 */
2024static inline void
2025vm_page_free_wakeup(void)
2026{
2027
2028	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
2029	/*
2030	 * if pageout daemon needs pages, then tell it that there are
2031	 * some free.
2032	 */
2033	if (vm_pageout_pages_needed &&
2034	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
2035		wakeup(&vm_pageout_pages_needed);
2036		vm_pageout_pages_needed = 0;
2037	}
2038	/*
2039	 * wakeup processes that are waiting on memory if we hit a
2040	 * high water mark. And wakeup scheduler process if we have
2041	 * lots of memory. this process will swapin processes.
2042	 */
2043	if (vm_pages_needed && !vm_page_count_min()) {
2044		vm_pages_needed = 0;
2045		wakeup(&cnt.v_free_count);
2046	}
2047}
2048
2049/*
2050 *	vm_page_free_toq:
2051 *
2052 *	Returns the given page to the free list,
2053 *	disassociating it with any VM object.
2054 *
2055 *	The object must be locked.  The page must be locked if it is managed.
2056 */
2057void
2058vm_page_free_toq(vm_page_t m)
2059{
2060
2061	if ((m->oflags & VPO_UNMANAGED) == 0) {
2062		vm_page_lock_assert(m, MA_OWNED);
2063		KASSERT(!pmap_page_is_mapped(m),
2064		    ("vm_page_free_toq: freeing mapped page %p", m));
2065	} else
2066		KASSERT(m->queue == PQ_NONE,
2067		    ("vm_page_free_toq: unmanaged page %p is queued", m));
2068	PCPU_INC(cnt.v_tfree);
2069
2070	if (VM_PAGE_IS_FREE(m))
2071		panic("vm_page_free: freeing free page %p", m);
2072	else if (m->busy != 0)
2073		panic("vm_page_free: freeing busy page %p", m);
2074
2075	/*
2076	 * Unqueue, then remove page.  Note that we cannot destroy
2077	 * the page here because we do not want to call the pager's
2078	 * callback routine until after we've put the page on the
2079	 * appropriate free queue.
2080	 */
2081	vm_page_remque(m);
2082	vm_page_remove(m);
2083
2084	/*
2085	 * If fictitious remove object association and
2086	 * return, otherwise delay object association removal.
2087	 */
2088	if ((m->flags & PG_FICTITIOUS) != 0) {
2089		return;
2090	}
2091
2092	m->valid = 0;
2093	vm_page_undirty(m);
2094
2095	if (m->wire_count != 0)
2096		panic("vm_page_free: freeing wired page %p", m);
2097	if (m->hold_count != 0) {
2098		m->flags &= ~PG_ZERO;
2099		KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2100		    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
2101		m->flags |= PG_UNHOLDFREE;
2102	} else {
2103		/*
2104		 * Restore the default memory attribute to the page.
2105		 */
2106		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2107			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2108
2109		/*
2110		 * Insert the page into the physical memory allocator's
2111		 * cache/free page queues.
2112		 */
2113		mtx_lock(&vm_page_queue_free_mtx);
2114		m->flags |= PG_FREE;
2115		cnt.v_free_count++;
2116#if VM_NRESERVLEVEL > 0
2117		if (!vm_reserv_free_page(m))
2118#else
2119		if (TRUE)
2120#endif
2121			vm_phys_free_pages(m, 0);
2122		if ((m->flags & PG_ZERO) != 0)
2123			++vm_page_zero_count;
2124		else
2125			vm_page_zero_idle_wakeup();
2126		vm_page_free_wakeup();
2127		mtx_unlock(&vm_page_queue_free_mtx);
2128	}
2129}
2130
2131/*
2132 *	vm_page_wire:
2133 *
2134 *	Mark this page as wired down by yet
2135 *	another map, removing it from paging queues
2136 *	as necessary.
2137 *
2138 *	If the page is fictitious, then its wire count must remain one.
2139 *
2140 *	The page must be locked.
2141 */
2142void
2143vm_page_wire(vm_page_t m)
2144{
2145
2146	/*
2147	 * Only bump the wire statistics if the page is not already wired,
2148	 * and only unqueue the page if it is on some queue (if it is unmanaged
2149	 * it is already off the queues).
2150	 */
2151	vm_page_lock_assert(m, MA_OWNED);
2152	if ((m->flags & PG_FICTITIOUS) != 0) {
2153		KASSERT(m->wire_count == 1,
2154		    ("vm_page_wire: fictitious page %p's wire count isn't one",
2155		    m));
2156		return;
2157	}
2158	if (m->wire_count == 0) {
2159		KASSERT((m->oflags & VPO_UNMANAGED) == 0 ||
2160		    m->queue == PQ_NONE,
2161		    ("vm_page_wire: unmanaged page %p is queued", m));
2162		vm_page_remque(m);
2163		atomic_add_int(&cnt.v_wire_count, 1);
2164	}
2165	m->wire_count++;
2166	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
2167}
2168
2169/*
2170 * vm_page_unwire:
2171 *
2172 * Release one wiring of the specified page, potentially enabling it to be
2173 * paged again.  If paging is enabled, then the value of the parameter
2174 * "activate" determines to which queue the page is added.  If "activate" is
2175 * non-zero, then the page is added to the active queue.  Otherwise, it is
2176 * added to the inactive queue.
2177 *
2178 * However, unless the page belongs to an object, it is not enqueued because
2179 * it cannot be paged out.
2180 *
2181 * If a page is fictitious, then its wire count must alway be one.
2182 *
2183 * A managed page must be locked.
2184 */
2185void
2186vm_page_unwire(vm_page_t m, int activate)
2187{
2188
2189	if ((m->oflags & VPO_UNMANAGED) == 0)
2190		vm_page_lock_assert(m, MA_OWNED);
2191	if ((m->flags & PG_FICTITIOUS) != 0) {
2192		KASSERT(m->wire_count == 1,
2193	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2194		return;
2195	}
2196	if (m->wire_count > 0) {
2197		m->wire_count--;
2198		if (m->wire_count == 0) {
2199			atomic_subtract_int(&cnt.v_wire_count, 1);
2200			if ((m->oflags & VPO_UNMANAGED) != 0 ||
2201			    m->object == NULL)
2202				return;
2203			if (!activate)
2204				m->flags &= ~PG_WINATCFLS;
2205			vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2206		}
2207	} else
2208		panic("vm_page_unwire: page %p's wire count is zero", m);
2209}
2210
2211/*
2212 * Move the specified page to the inactive queue.
2213 *
2214 * Many pages placed on the inactive queue should actually go
2215 * into the cache, but it is difficult to figure out which.  What
2216 * we do instead, if the inactive target is well met, is to put
2217 * clean pages at the head of the inactive queue instead of the tail.
2218 * This will cause them to be moved to the cache more quickly and
2219 * if not actively re-referenced, reclaimed more quickly.  If we just
2220 * stick these pages at the end of the inactive queue, heavy filesystem
2221 * meta-data accesses can cause an unnecessary paging load on memory bound
2222 * processes.  This optimization causes one-time-use metadata to be
2223 * reused more quickly.
2224 *
2225 * Normally athead is 0 resulting in LRU operation.  athead is set
2226 * to 1 if we want this page to be 'as if it were placed in the cache',
2227 * except without unmapping it from the process address space.
2228 *
2229 * The page must be locked.
2230 */
2231static inline void
2232_vm_page_deactivate(vm_page_t m, int athead)
2233{
2234	struct vm_pagequeue *pq;
2235	int queue;
2236
2237	vm_page_lock_assert(m, MA_OWNED);
2238
2239	/*
2240	 * Ignore if already inactive.
2241	 */
2242	if ((queue = m->queue) == PQ_INACTIVE)
2243		return;
2244	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2245		if (queue != PQ_NONE)
2246			vm_page_dequeue(m);
2247		m->flags &= ~PG_WINATCFLS;
2248		pq = &vm_pagequeues[PQ_INACTIVE];
2249		vm_pagequeue_lock(pq);
2250		m->queue = PQ_INACTIVE;
2251		if (athead)
2252			TAILQ_INSERT_HEAD(&pq->pq_pl, m, pageq);
2253		else
2254			TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq);
2255		cnt.v_inactive_count++;
2256		vm_pagequeue_unlock(pq);
2257	}
2258}
2259
2260/*
2261 * Move the specified page to the inactive queue.
2262 *
2263 * The page must be locked.
2264 */
2265void
2266vm_page_deactivate(vm_page_t m)
2267{
2268
2269	_vm_page_deactivate(m, 0);
2270}
2271
2272/*
2273 * vm_page_try_to_cache:
2274 *
2275 * Returns 0 on failure, 1 on success
2276 */
2277int
2278vm_page_try_to_cache(vm_page_t m)
2279{
2280
2281	vm_page_lock_assert(m, MA_OWNED);
2282	VM_OBJECT_ASSERT_WLOCKED(m->object);
2283	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2284	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2285		return (0);
2286	pmap_remove_all(m);
2287	if (m->dirty)
2288		return (0);
2289	vm_page_cache(m);
2290	return (1);
2291}
2292
2293/*
2294 * vm_page_try_to_free()
2295 *
2296 *	Attempt to free the page.  If we cannot free it, we do nothing.
2297 *	1 is returned on success, 0 on failure.
2298 */
2299int
2300vm_page_try_to_free(vm_page_t m)
2301{
2302
2303	vm_page_lock_assert(m, MA_OWNED);
2304	if (m->object != NULL)
2305		VM_OBJECT_ASSERT_WLOCKED(m->object);
2306	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2307	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2308		return (0);
2309	pmap_remove_all(m);
2310	if (m->dirty)
2311		return (0);
2312	vm_page_free(m);
2313	return (1);
2314}
2315
2316/*
2317 * vm_page_cache
2318 *
2319 * Put the specified page onto the page cache queue (if appropriate).
2320 *
2321 * The object and page must be locked.
2322 */
2323void
2324vm_page_cache(vm_page_t m)
2325{
2326	vm_object_t object;
2327	vm_page_t next, prev, root;
2328
2329	vm_page_lock_assert(m, MA_OWNED);
2330	object = m->object;
2331	VM_OBJECT_ASSERT_WLOCKED(object);
2332	if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
2333	    m->hold_count || m->wire_count)
2334		panic("vm_page_cache: attempting to cache busy page");
2335	KASSERT(!pmap_page_is_mapped(m),
2336	    ("vm_page_cache: page %p is mapped", m));
2337	KASSERT(m->dirty == 0, ("vm_page_cache: page %p is dirty", m));
2338	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2339	    (object->type == OBJT_SWAP &&
2340	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2341		/*
2342		 * Hypothesis: A cache-elgible page belonging to a
2343		 * default object or swap object but without a backing
2344		 * store must be zero filled.
2345		 */
2346		vm_page_free(m);
2347		return;
2348	}
2349	KASSERT((m->flags & PG_CACHED) == 0,
2350	    ("vm_page_cache: page %p is already cached", m));
2351	PCPU_INC(cnt.v_tcached);
2352
2353	/*
2354	 * Remove the page from the paging queues.
2355	 */
2356	vm_page_remque(m);
2357
2358	/*
2359	 * Remove the page from the object's collection of resident
2360	 * pages.
2361	 */
2362	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
2363		/*
2364		 * Since the page's successor in the list is also its parent
2365		 * in the tree, its right subtree must be empty.
2366		 */
2367		next->left = m->left;
2368		KASSERT(m->right == NULL,
2369		    ("vm_page_cache: page %p has right child", m));
2370	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
2371	    prev->right == m) {
2372		/*
2373		 * Since the page's predecessor in the list is also its parent
2374		 * in the tree, its left subtree must be empty.
2375		 */
2376		KASSERT(m->left == NULL,
2377		    ("vm_page_cache: page %p has left child", m));
2378		prev->right = m->right;
2379	} else {
2380		if (m != object->root)
2381			vm_page_splay(m->pindex, object->root);
2382		if (m->left == NULL)
2383			root = m->right;
2384		else if (m->right == NULL)
2385			root = m->left;
2386		else {
2387			/*
2388			 * Move the page's successor to the root, because
2389			 * pages are usually removed in ascending order.
2390			 */
2391			if (m->right != next)
2392				vm_page_splay(m->pindex, m->right);
2393			next->left = m->left;
2394			root = next;
2395		}
2396		object->root = root;
2397	}
2398	TAILQ_REMOVE(&object->memq, m, listq);
2399	object->resident_page_count--;
2400
2401	/*
2402	 * Restore the default memory attribute to the page.
2403	 */
2404	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2405		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2406
2407	/*
2408	 * Insert the page into the object's collection of cached pages
2409	 * and the physical memory allocator's cache/free page queues.
2410	 */
2411	m->flags &= ~PG_ZERO;
2412	mtx_lock(&vm_page_queue_free_mtx);
2413	m->flags |= PG_CACHED;
2414	cnt.v_cache_count++;
2415	root = object->cache;
2416	if (root == NULL) {
2417		m->left = NULL;
2418		m->right = NULL;
2419	} else {
2420		root = vm_page_splay(m->pindex, root);
2421		if (m->pindex < root->pindex) {
2422			m->left = root->left;
2423			m->right = root;
2424			root->left = NULL;
2425		} else if (__predict_false(m->pindex == root->pindex))
2426			panic("vm_page_cache: offset already cached");
2427		else {
2428			m->right = root->right;
2429			m->left = root;
2430			root->right = NULL;
2431		}
2432	}
2433	object->cache = m;
2434#if VM_NRESERVLEVEL > 0
2435	if (!vm_reserv_free_page(m)) {
2436#else
2437	if (TRUE) {
2438#endif
2439		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2440		vm_phys_free_pages(m, 0);
2441	}
2442	vm_page_free_wakeup();
2443	mtx_unlock(&vm_page_queue_free_mtx);
2444
2445	/*
2446	 * Increment the vnode's hold count if this is the object's only
2447	 * cached page.  Decrement the vnode's hold count if this was
2448	 * the object's only resident page.
2449	 */
2450	if (object->type == OBJT_VNODE) {
2451		if (root == NULL && object->resident_page_count != 0)
2452			vhold(object->handle);
2453		else if (root != NULL && object->resident_page_count == 0)
2454			vdrop(object->handle);
2455	}
2456}
2457
2458/*
2459 * vm_page_dontneed
2460 *
2461 *	Cache, deactivate, or do nothing as appropriate.  This routine
2462 *	is typically used by madvise() MADV_DONTNEED.
2463 *
2464 *	Generally speaking we want to move the page into the cache so
2465 *	it gets reused quickly.  However, this can result in a silly syndrome
2466 *	due to the page recycling too quickly.  Small objects will not be
2467 *	fully cached.  On the otherhand, if we move the page to the inactive
2468 *	queue we wind up with a problem whereby very large objects
2469 *	unnecessarily blow away our inactive and cache queues.
2470 *
2471 *	The solution is to move the pages based on a fixed weighting.  We
2472 *	either leave them alone, deactivate them, or move them to the cache,
2473 *	where moving them to the cache has the highest weighting.
2474 *	By forcing some pages into other queues we eventually force the
2475 *	system to balance the queues, potentially recovering other unrelated
2476 *	space from active.  The idea is to not force this to happen too
2477 *	often.
2478 *
2479 *	The object and page must be locked.
2480 */
2481void
2482vm_page_dontneed(vm_page_t m)
2483{
2484	int dnw;
2485	int head;
2486
2487	vm_page_lock_assert(m, MA_OWNED);
2488	VM_OBJECT_ASSERT_WLOCKED(m->object);
2489	dnw = PCPU_GET(dnweight);
2490	PCPU_INC(dnweight);
2491
2492	/*
2493	 * Occasionally leave the page alone.
2494	 */
2495	if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
2496		if (m->act_count >= ACT_INIT)
2497			--m->act_count;
2498		return;
2499	}
2500
2501	/*
2502	 * Clear any references to the page.  Otherwise, the page daemon will
2503	 * immediately reactivate the page.
2504	 *
2505	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
2506	 * pmap operation, such as pmap_remove(), could clear a reference in
2507	 * the pmap and set PGA_REFERENCED on the page before the
2508	 * pmap_clear_reference() had completed.  Consequently, the page would
2509	 * appear referenced based upon an old reference that occurred before
2510	 * this function ran.
2511	 */
2512	pmap_clear_reference(m);
2513	vm_page_aflag_clear(m, PGA_REFERENCED);
2514
2515	if (m->dirty == 0 && pmap_is_modified(m))
2516		vm_page_dirty(m);
2517
2518	if (m->dirty || (dnw & 0x0070) == 0) {
2519		/*
2520		 * Deactivate the page 3 times out of 32.
2521		 */
2522		head = 0;
2523	} else {
2524		/*
2525		 * Cache the page 28 times out of every 32.  Note that
2526		 * the page is deactivated instead of cached, but placed
2527		 * at the head of the queue instead of the tail.
2528		 */
2529		head = 1;
2530	}
2531	_vm_page_deactivate(m, head);
2532}
2533
2534/*
2535 * Grab a page, waiting until we are waken up due to the page
2536 * changing state.  We keep on waiting, if the page continues
2537 * to be in the object.  If the page doesn't exist, first allocate it
2538 * and then conditionally zero it.
2539 *
2540 * The caller must always specify the VM_ALLOC_RETRY flag.  This is intended
2541 * to facilitate its eventual removal.
2542 *
2543 * This routine may sleep.
2544 *
2545 * The object must be locked on entry.  The lock will, however, be released
2546 * and reacquired if the routine sleeps.
2547 */
2548vm_page_t
2549vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2550{
2551	vm_page_t m;
2552
2553	VM_OBJECT_ASSERT_WLOCKED(object);
2554	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
2555	    ("vm_page_grab: VM_ALLOC_RETRY is required"));
2556retrylookup:
2557	if ((m = vm_page_lookup(object, pindex)) != NULL) {
2558		if ((m->oflags & VPO_BUSY) != 0 ||
2559		    ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) {
2560			/*
2561			 * Reference the page before unlocking and
2562			 * sleeping so that the page daemon is less
2563			 * likely to reclaim it.
2564			 */
2565			vm_page_aflag_set(m, PGA_REFERENCED);
2566			vm_page_sleep(m, "pgrbwt");
2567			goto retrylookup;
2568		} else {
2569			if ((allocflags & VM_ALLOC_WIRED) != 0) {
2570				vm_page_lock(m);
2571				vm_page_wire(m);
2572				vm_page_unlock(m);
2573			}
2574			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
2575				vm_page_busy(m);
2576			return (m);
2577		}
2578	}
2579	m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
2580	    VM_ALLOC_IGN_SBUSY));
2581	if (m == NULL) {
2582		VM_OBJECT_WUNLOCK(object);
2583		VM_WAIT;
2584		VM_OBJECT_WLOCK(object);
2585		goto retrylookup;
2586	} else if (m->valid != 0)
2587		return (m);
2588	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2589		pmap_zero_page(m);
2590	return (m);
2591}
2592
2593/*
2594 * Mapping function for valid or dirty bits in a page.
2595 *
2596 * Inputs are required to range within a page.
2597 */
2598vm_page_bits_t
2599vm_page_bits(int base, int size)
2600{
2601	int first_bit;
2602	int last_bit;
2603
2604	KASSERT(
2605	    base + size <= PAGE_SIZE,
2606	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2607	);
2608
2609	if (size == 0)		/* handle degenerate case */
2610		return (0);
2611
2612	first_bit = base >> DEV_BSHIFT;
2613	last_bit = (base + size - 1) >> DEV_BSHIFT;
2614
2615	return (((vm_page_bits_t)2 << last_bit) -
2616	    ((vm_page_bits_t)1 << first_bit));
2617}
2618
2619/*
2620 *	vm_page_set_valid_range:
2621 *
2622 *	Sets portions of a page valid.  The arguments are expected
2623 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2624 *	of any partial chunks touched by the range.  The invalid portion of
2625 *	such chunks will be zeroed.
2626 *
2627 *	(base + size) must be less then or equal to PAGE_SIZE.
2628 */
2629void
2630vm_page_set_valid_range(vm_page_t m, int base, int size)
2631{
2632	int endoff, frag;
2633
2634	VM_OBJECT_ASSERT_WLOCKED(m->object);
2635	if (size == 0)	/* handle degenerate case */
2636		return;
2637
2638	/*
2639	 * If the base is not DEV_BSIZE aligned and the valid
2640	 * bit is clear, we have to zero out a portion of the
2641	 * first block.
2642	 */
2643	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2644	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2645		pmap_zero_page_area(m, frag, base - frag);
2646
2647	/*
2648	 * If the ending offset is not DEV_BSIZE aligned and the
2649	 * valid bit is clear, we have to zero out a portion of
2650	 * the last block.
2651	 */
2652	endoff = base + size;
2653	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2654	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2655		pmap_zero_page_area(m, endoff,
2656		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2657
2658	/*
2659	 * Assert that no previously invalid block that is now being validated
2660	 * is already dirty.
2661	 */
2662	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2663	    ("vm_page_set_valid_range: page %p is dirty", m));
2664
2665	/*
2666	 * Set valid bits inclusive of any overlap.
2667	 */
2668	m->valid |= vm_page_bits(base, size);
2669}
2670
2671/*
2672 * Clear the given bits from the specified page's dirty field.
2673 */
2674static __inline void
2675vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2676{
2677	uintptr_t addr;
2678#if PAGE_SIZE < 16384
2679	int shift;
2680#endif
2681
2682	/*
2683	 * If the object is locked and the page is neither VPO_BUSY nor
2684	 * write mapped, then the page's dirty field cannot possibly be
2685	 * set by a concurrent pmap operation.
2686	 */
2687	VM_OBJECT_ASSERT_WLOCKED(m->object);
2688	if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
2689		m->dirty &= ~pagebits;
2690	else {
2691		/*
2692		 * The pmap layer can call vm_page_dirty() without
2693		 * holding a distinguished lock.  The combination of
2694		 * the object's lock and an atomic operation suffice
2695		 * to guarantee consistency of the page dirty field.
2696		 *
2697		 * For PAGE_SIZE == 32768 case, compiler already
2698		 * properly aligns the dirty field, so no forcible
2699		 * alignment is needed. Only require existence of
2700		 * atomic_clear_64 when page size is 32768.
2701		 */
2702		addr = (uintptr_t)&m->dirty;
2703#if PAGE_SIZE == 32768
2704		atomic_clear_64((uint64_t *)addr, pagebits);
2705#elif PAGE_SIZE == 16384
2706		atomic_clear_32((uint32_t *)addr, pagebits);
2707#else		/* PAGE_SIZE <= 8192 */
2708		/*
2709		 * Use a trick to perform a 32-bit atomic on the
2710		 * containing aligned word, to not depend on the existence
2711		 * of atomic_clear_{8, 16}.
2712		 */
2713		shift = addr & (sizeof(uint32_t) - 1);
2714#if BYTE_ORDER == BIG_ENDIAN
2715		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2716#else
2717		shift *= NBBY;
2718#endif
2719		addr &= ~(sizeof(uint32_t) - 1);
2720		atomic_clear_32((uint32_t *)addr, pagebits << shift);
2721#endif		/* PAGE_SIZE */
2722	}
2723}
2724
2725/*
2726 *	vm_page_set_validclean:
2727 *
2728 *	Sets portions of a page valid and clean.  The arguments are expected
2729 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2730 *	of any partial chunks touched by the range.  The invalid portion of
2731 *	such chunks will be zero'd.
2732 *
2733 *	(base + size) must be less then or equal to PAGE_SIZE.
2734 */
2735void
2736vm_page_set_validclean(vm_page_t m, int base, int size)
2737{
2738	vm_page_bits_t oldvalid, pagebits;
2739	int endoff, frag;
2740
2741	VM_OBJECT_ASSERT_WLOCKED(m->object);
2742	if (size == 0)	/* handle degenerate case */
2743		return;
2744
2745	/*
2746	 * If the base is not DEV_BSIZE aligned and the valid
2747	 * bit is clear, we have to zero out a portion of the
2748	 * first block.
2749	 */
2750	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2751	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2752		pmap_zero_page_area(m, frag, base - frag);
2753
2754	/*
2755	 * If the ending offset is not DEV_BSIZE aligned and the
2756	 * valid bit is clear, we have to zero out a portion of
2757	 * the last block.
2758	 */
2759	endoff = base + size;
2760	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2761	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2762		pmap_zero_page_area(m, endoff,
2763		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2764
2765	/*
2766	 * Set valid, clear dirty bits.  If validating the entire
2767	 * page we can safely clear the pmap modify bit.  We also
2768	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2769	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2770	 * be set again.
2771	 *
2772	 * We set valid bits inclusive of any overlap, but we can only
2773	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2774	 * the range.
2775	 */
2776	oldvalid = m->valid;
2777	pagebits = vm_page_bits(base, size);
2778	m->valid |= pagebits;
2779#if 0	/* NOT YET */
2780	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2781		frag = DEV_BSIZE - frag;
2782		base += frag;
2783		size -= frag;
2784		if (size < 0)
2785			size = 0;
2786	}
2787	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2788#endif
2789	if (base == 0 && size == PAGE_SIZE) {
2790		/*
2791		 * The page can only be modified within the pmap if it is
2792		 * mapped, and it can only be mapped if it was previously
2793		 * fully valid.
2794		 */
2795		if (oldvalid == VM_PAGE_BITS_ALL)
2796			/*
2797			 * Perform the pmap_clear_modify() first.  Otherwise,
2798			 * a concurrent pmap operation, such as
2799			 * pmap_protect(), could clear a modification in the
2800			 * pmap and set the dirty field on the page before
2801			 * pmap_clear_modify() had begun and after the dirty
2802			 * field was cleared here.
2803			 */
2804			pmap_clear_modify(m);
2805		m->dirty = 0;
2806		m->oflags &= ~VPO_NOSYNC;
2807	} else if (oldvalid != VM_PAGE_BITS_ALL)
2808		m->dirty &= ~pagebits;
2809	else
2810		vm_page_clear_dirty_mask(m, pagebits);
2811}
2812
2813void
2814vm_page_clear_dirty(vm_page_t m, int base, int size)
2815{
2816
2817	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2818}
2819
2820/*
2821 *	vm_page_set_invalid:
2822 *
2823 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2824 *	valid and dirty bits for the effected areas are cleared.
2825 */
2826void
2827vm_page_set_invalid(vm_page_t m, int base, int size)
2828{
2829	vm_page_bits_t bits;
2830
2831	VM_OBJECT_ASSERT_WLOCKED(m->object);
2832	KASSERT((m->oflags & VPO_BUSY) == 0,
2833	    ("vm_page_set_invalid: page %p is busy", m));
2834	bits = vm_page_bits(base, size);
2835	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2836		pmap_remove_all(m);
2837	KASSERT(!pmap_page_is_mapped(m),
2838	    ("vm_page_set_invalid: page %p is mapped", m));
2839	m->valid &= ~bits;
2840	m->dirty &= ~bits;
2841}
2842
2843/*
2844 * vm_page_zero_invalid()
2845 *
2846 *	The kernel assumes that the invalid portions of a page contain
2847 *	garbage, but such pages can be mapped into memory by user code.
2848 *	When this occurs, we must zero out the non-valid portions of the
2849 *	page so user code sees what it expects.
2850 *
2851 *	Pages are most often semi-valid when the end of a file is mapped
2852 *	into memory and the file's size is not page aligned.
2853 */
2854void
2855vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2856{
2857	int b;
2858	int i;
2859
2860	VM_OBJECT_ASSERT_WLOCKED(m->object);
2861	/*
2862	 * Scan the valid bits looking for invalid sections that
2863	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2864	 * valid bit may be set ) have already been zerod by
2865	 * vm_page_set_validclean().
2866	 */
2867	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2868		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2869		    (m->valid & ((vm_page_bits_t)1 << i))) {
2870			if (i > b) {
2871				pmap_zero_page_area(m,
2872				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
2873			}
2874			b = i + 1;
2875		}
2876	}
2877
2878	/*
2879	 * setvalid is TRUE when we can safely set the zero'd areas
2880	 * as being valid.  We can do this if there are no cache consistancy
2881	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2882	 */
2883	if (setvalid)
2884		m->valid = VM_PAGE_BITS_ALL;
2885}
2886
2887/*
2888 *	vm_page_is_valid:
2889 *
2890 *	Is (partial) page valid?  Note that the case where size == 0
2891 *	will return FALSE in the degenerate case where the page is
2892 *	entirely invalid, and TRUE otherwise.
2893 */
2894int
2895vm_page_is_valid(vm_page_t m, int base, int size)
2896{
2897	vm_page_bits_t bits;
2898
2899	VM_OBJECT_ASSERT_WLOCKED(m->object);
2900	bits = vm_page_bits(base, size);
2901	if (m->valid && ((m->valid & bits) == bits))
2902		return 1;
2903	else
2904		return 0;
2905}
2906
2907/*
2908 * Set the page's dirty bits if the page is modified.
2909 */
2910void
2911vm_page_test_dirty(vm_page_t m)
2912{
2913
2914	VM_OBJECT_ASSERT_WLOCKED(m->object);
2915	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
2916		vm_page_dirty(m);
2917}
2918
2919void
2920vm_page_lock_KBI(vm_page_t m, const char *file, int line)
2921{
2922
2923	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
2924}
2925
2926void
2927vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
2928{
2929
2930	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
2931}
2932
2933int
2934vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
2935{
2936
2937	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
2938}
2939
2940#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
2941void
2942vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
2943{
2944
2945	mtx_assert_(vm_page_lockptr(m), a, file, line);
2946}
2947#endif
2948
2949int so_zerocp_fullpage = 0;
2950
2951/*
2952 *	Replace the given page with a copy.  The copied page assumes
2953 *	the portion of the given page's "wire_count" that is not the
2954 *	responsibility of this copy-on-write mechanism.
2955 *
2956 *	The object containing the given page must have a non-zero
2957 *	paging-in-progress count and be locked.
2958 */
2959void
2960vm_page_cowfault(vm_page_t m)
2961{
2962	vm_page_t mnew;
2963	vm_object_t object;
2964	vm_pindex_t pindex;
2965
2966	vm_page_lock_assert(m, MA_OWNED);
2967	object = m->object;
2968	VM_OBJECT_ASSERT_WLOCKED(object);
2969	KASSERT(object->paging_in_progress != 0,
2970	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2971	    object));
2972	pindex = m->pindex;
2973
2974 retry_alloc:
2975	pmap_remove_all(m);
2976	vm_page_remove(m);
2977	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
2978	if (mnew == NULL) {
2979		vm_page_insert(m, object, pindex);
2980		vm_page_unlock(m);
2981		VM_OBJECT_WUNLOCK(object);
2982		VM_WAIT;
2983		VM_OBJECT_WLOCK(object);
2984		if (m == vm_page_lookup(object, pindex)) {
2985			vm_page_lock(m);
2986			goto retry_alloc;
2987		} else {
2988			/*
2989			 * Page disappeared during the wait.
2990			 */
2991			return;
2992		}
2993	}
2994
2995	if (m->cow == 0) {
2996		/*
2997		 * check to see if we raced with an xmit complete when
2998		 * waiting to allocate a page.  If so, put things back
2999		 * the way they were
3000		 */
3001		vm_page_unlock(m);
3002		vm_page_lock(mnew);
3003		vm_page_free(mnew);
3004		vm_page_unlock(mnew);
3005		vm_page_insert(m, object, pindex);
3006	} else { /* clear COW & copy page */
3007		if (!so_zerocp_fullpage)
3008			pmap_copy_page(m, mnew);
3009		mnew->valid = VM_PAGE_BITS_ALL;
3010		vm_page_dirty(mnew);
3011		mnew->wire_count = m->wire_count - m->cow;
3012		m->wire_count = m->cow;
3013		vm_page_unlock(m);
3014	}
3015}
3016
3017void
3018vm_page_cowclear(vm_page_t m)
3019{
3020
3021	vm_page_lock_assert(m, MA_OWNED);
3022	if (m->cow) {
3023		m->cow--;
3024		/*
3025		 * let vm_fault add back write permission  lazily
3026		 */
3027	}
3028	/*
3029	 *  sf_buf_free() will free the page, so we needn't do it here
3030	 */
3031}
3032
3033int
3034vm_page_cowsetup(vm_page_t m)
3035{
3036
3037	vm_page_lock_assert(m, MA_OWNED);
3038	if ((m->flags & PG_FICTITIOUS) != 0 ||
3039	    (m->oflags & VPO_UNMANAGED) != 0 ||
3040	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYWLOCK(m->object))
3041		return (EBUSY);
3042	m->cow++;
3043	pmap_remove_write(m);
3044	VM_OBJECT_WUNLOCK(m->object);
3045	return (0);
3046}
3047
3048#ifdef INVARIANTS
3049void
3050vm_page_object_lock_assert(vm_page_t m)
3051{
3052
3053	/*
3054	 * Certain of the page's fields may only be modified by the
3055	 * holder of the containing object's lock or the setter of the
3056	 * page's VPO_BUSY flag.  Unfortunately, the setter of the
3057	 * VPO_BUSY flag is not recorded, and thus cannot be checked
3058	 * here.
3059	 */
3060	if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
3061		VM_OBJECT_ASSERT_WLOCKED(m->object);
3062}
3063#endif
3064
3065#include "opt_ddb.h"
3066#ifdef DDB
3067#include <sys/kernel.h>
3068
3069#include <ddb/ddb.h>
3070
3071DB_SHOW_COMMAND(page, vm_page_print_page_info)
3072{
3073	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
3074	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
3075	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
3076	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
3077	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
3078	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
3079	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
3080	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
3081	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
3082	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
3083}
3084
3085DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3086{
3087
3088	db_printf("PQ_FREE:");
3089	db_printf(" %d", cnt.v_free_count);
3090	db_printf("\n");
3091
3092	db_printf("PQ_CACHE:");
3093	db_printf(" %d", cnt.v_cache_count);
3094	db_printf("\n");
3095
3096	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
3097		*vm_pagequeues[PQ_ACTIVE].pq_cnt,
3098		*vm_pagequeues[PQ_INACTIVE].pq_cnt);
3099}
3100#endif /* DDB */
3101