vm_page.c revision 241512
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34 */
35
36/*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *			GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 *	- a pageq mutex is required when adding or removing a page from a
67 *	  page queue (vm_page_queue[]), regardless of other mutexes or the
68 *	  busy state of a page.
69 *
70 *	- The object mutex is held when inserting or removing
71 *	  pages from an object (vm_page_insert() or vm_page_remove()).
72 *
73 */
74
75/*
76 *	Resident memory management module.
77 */
78
79#include <sys/cdefs.h>
80__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 241512 2012-10-13 18:46:46Z alc $");
81
82#include "opt_vm.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/lock.h>
87#include <sys/kernel.h>
88#include <sys/limits.h>
89#include <sys/malloc.h>
90#include <sys/msgbuf.h>
91#include <sys/mutex.h>
92#include <sys/proc.h>
93#include <sys/sysctl.h>
94#include <sys/vmmeter.h>
95#include <sys/vnode.h>
96
97#include <vm/vm.h>
98#include <vm/pmap.h>
99#include <vm/vm_param.h>
100#include <vm/vm_kern.h>
101#include <vm/vm_object.h>
102#include <vm/vm_page.h>
103#include <vm/vm_pageout.h>
104#include <vm/vm_pager.h>
105#include <vm/vm_phys.h>
106#include <vm/vm_reserv.h>
107#include <vm/vm_extern.h>
108#include <vm/uma.h>
109#include <vm/uma_int.h>
110
111#include <machine/md_var.h>
112
113/*
114 *	Associated with page of user-allocatable memory is a
115 *	page structure.
116 */
117
118struct vpgqueues vm_page_queues[PQ_COUNT];
119struct vpglocks vm_page_queue_lock;
120struct vpglocks vm_page_queue_free_lock;
121
122struct vpglocks	pa_lock[PA_LOCK_COUNT];
123
124vm_page_t vm_page_array;
125long vm_page_array_size;
126long first_page;
127int vm_page_zero_count;
128
129static int boot_pages = UMA_BOOT_PAGES;
130TUNABLE_INT("vm.boot_pages", &boot_pages);
131SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
132	"number of pages allocated for bootstrapping the VM system");
133
134static int pa_tryrelock_restart;
135SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
136    &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
137
138static uma_zone_t fakepg_zone;
139
140static struct vnode *vm_page_alloc_init(vm_page_t m);
141static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
142static void vm_page_queue_remove(int queue, vm_page_t m);
143static void vm_page_enqueue(int queue, vm_page_t m);
144static void vm_page_init_fakepg(void *dummy);
145
146SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
147
148static void
149vm_page_init_fakepg(void *dummy)
150{
151
152	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
153	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
154}
155
156/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
157#if PAGE_SIZE == 32768
158#ifdef CTASSERT
159CTASSERT(sizeof(u_long) >= 8);
160#endif
161#endif
162
163/*
164 * Try to acquire a physical address lock while a pmap is locked.  If we
165 * fail to trylock we unlock and lock the pmap directly and cache the
166 * locked pa in *locked.  The caller should then restart their loop in case
167 * the virtual to physical mapping has changed.
168 */
169int
170vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
171{
172	vm_paddr_t lockpa;
173
174	lockpa = *locked;
175	*locked = pa;
176	if (lockpa) {
177		PA_LOCK_ASSERT(lockpa, MA_OWNED);
178		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
179			return (0);
180		PA_UNLOCK(lockpa);
181	}
182	if (PA_TRYLOCK(pa))
183		return (0);
184	PMAP_UNLOCK(pmap);
185	atomic_add_int(&pa_tryrelock_restart, 1);
186	PA_LOCK(pa);
187	PMAP_LOCK(pmap);
188	return (EAGAIN);
189}
190
191/*
192 *	vm_set_page_size:
193 *
194 *	Sets the page size, perhaps based upon the memory
195 *	size.  Must be called before any use of page-size
196 *	dependent functions.
197 */
198void
199vm_set_page_size(void)
200{
201	if (cnt.v_page_size == 0)
202		cnt.v_page_size = PAGE_SIZE;
203	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
204		panic("vm_set_page_size: page size not a power of two");
205}
206
207/*
208 *	vm_page_blacklist_lookup:
209 *
210 *	See if a physical address in this page has been listed
211 *	in the blacklist tunable.  Entries in the tunable are
212 *	separated by spaces or commas.  If an invalid integer is
213 *	encountered then the rest of the string is skipped.
214 */
215static int
216vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
217{
218	vm_paddr_t bad;
219	char *cp, *pos;
220
221	for (pos = list; *pos != '\0'; pos = cp) {
222		bad = strtoq(pos, &cp, 0);
223		if (*cp != '\0') {
224			if (*cp == ' ' || *cp == ',') {
225				cp++;
226				if (cp == pos)
227					continue;
228			} else
229				break;
230		}
231		if (pa == trunc_page(bad))
232			return (1);
233	}
234	return (0);
235}
236
237/*
238 *	vm_page_startup:
239 *
240 *	Initializes the resident memory module.
241 *
242 *	Allocates memory for the page cells, and
243 *	for the object/offset-to-page hash table headers.
244 *	Each page cell is initialized and placed on the free list.
245 */
246vm_offset_t
247vm_page_startup(vm_offset_t vaddr)
248{
249	vm_offset_t mapped;
250	vm_paddr_t page_range;
251	vm_paddr_t new_end;
252	int i;
253	vm_paddr_t pa;
254	vm_paddr_t last_pa;
255	char *list;
256
257	/* the biggest memory array is the second group of pages */
258	vm_paddr_t end;
259	vm_paddr_t biggestsize;
260	vm_paddr_t low_water, high_water;
261	int biggestone;
262
263	biggestsize = 0;
264	biggestone = 0;
265	vaddr = round_page(vaddr);
266
267	for (i = 0; phys_avail[i + 1]; i += 2) {
268		phys_avail[i] = round_page(phys_avail[i]);
269		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
270	}
271
272	low_water = phys_avail[0];
273	high_water = phys_avail[1];
274
275	for (i = 0; phys_avail[i + 1]; i += 2) {
276		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
277
278		if (size > biggestsize) {
279			biggestone = i;
280			biggestsize = size;
281		}
282		if (phys_avail[i] < low_water)
283			low_water = phys_avail[i];
284		if (phys_avail[i + 1] > high_water)
285			high_water = phys_avail[i + 1];
286	}
287
288#ifdef XEN
289	low_water = 0;
290#endif
291
292	end = phys_avail[biggestone+1];
293
294	/*
295	 * Initialize the page and queue locks.
296	 */
297	mtx_init(&vm_page_queue_mtx, "vm page queue", NULL, MTX_DEF |
298	    MTX_RECURSE);
299	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
300	for (i = 0; i < PA_LOCK_COUNT; i++)
301		mtx_init(&pa_lock[i].data, "vm page", NULL, MTX_DEF);
302
303	/*
304	 * Initialize the queue headers for the hold queue, the active queue,
305	 * and the inactive queue.
306	 */
307	for (i = 0; i < PQ_COUNT; i++)
308		TAILQ_INIT(&vm_page_queues[i].pl);
309	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
310	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
311	vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
312
313	/*
314	 * Allocate memory for use when boot strapping the kernel memory
315	 * allocator.
316	 */
317	new_end = end - (boot_pages * UMA_SLAB_SIZE);
318	new_end = trunc_page(new_end);
319	mapped = pmap_map(&vaddr, new_end, end,
320	    VM_PROT_READ | VM_PROT_WRITE);
321	bzero((void *)mapped, end - new_end);
322	uma_startup((void *)mapped, boot_pages);
323
324#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
325    defined(__mips__)
326	/*
327	 * Allocate a bitmap to indicate that a random physical page
328	 * needs to be included in a minidump.
329	 *
330	 * The amd64 port needs this to indicate which direct map pages
331	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
332	 *
333	 * However, i386 still needs this workspace internally within the
334	 * minidump code.  In theory, they are not needed on i386, but are
335	 * included should the sf_buf code decide to use them.
336	 */
337	last_pa = 0;
338	for (i = 0; dump_avail[i + 1] != 0; i += 2)
339		if (dump_avail[i + 1] > last_pa)
340			last_pa = dump_avail[i + 1];
341	page_range = last_pa / PAGE_SIZE;
342	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
343	new_end -= vm_page_dump_size;
344	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
345	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
346	bzero((void *)vm_page_dump, vm_page_dump_size);
347#endif
348#ifdef __amd64__
349	/*
350	 * Request that the physical pages underlying the message buffer be
351	 * included in a crash dump.  Since the message buffer is accessed
352	 * through the direct map, they are not automatically included.
353	 */
354	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
355	last_pa = pa + round_page(msgbufsize);
356	while (pa < last_pa) {
357		dump_add_page(pa);
358		pa += PAGE_SIZE;
359	}
360#endif
361	/*
362	 * Compute the number of pages of memory that will be available for
363	 * use (taking into account the overhead of a page structure per
364	 * page).
365	 */
366	first_page = low_water / PAGE_SIZE;
367#ifdef VM_PHYSSEG_SPARSE
368	page_range = 0;
369	for (i = 0; phys_avail[i + 1] != 0; i += 2)
370		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
371#elif defined(VM_PHYSSEG_DENSE)
372	page_range = high_water / PAGE_SIZE - first_page;
373#else
374#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
375#endif
376	end = new_end;
377
378	/*
379	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
380	 */
381	vaddr += PAGE_SIZE;
382
383	/*
384	 * Initialize the mem entry structures now, and put them in the free
385	 * queue.
386	 */
387	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
388	mapped = pmap_map(&vaddr, new_end, end,
389	    VM_PROT_READ | VM_PROT_WRITE);
390	vm_page_array = (vm_page_t) mapped;
391#if VM_NRESERVLEVEL > 0
392	/*
393	 * Allocate memory for the reservation management system's data
394	 * structures.
395	 */
396	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
397#endif
398#if defined(__amd64__) || defined(__mips__)
399	/*
400	 * pmap_map on amd64 and mips can come out of the direct-map, not kvm
401	 * like i386, so the pages must be tracked for a crashdump to include
402	 * this data.  This includes the vm_page_array and the early UMA
403	 * bootstrap pages.
404	 */
405	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
406		dump_add_page(pa);
407#endif
408	phys_avail[biggestone + 1] = new_end;
409
410	/*
411	 * Clear all of the page structures
412	 */
413	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
414	for (i = 0; i < page_range; i++)
415		vm_page_array[i].order = VM_NFREEORDER;
416	vm_page_array_size = page_range;
417
418	/*
419	 * Initialize the physical memory allocator.
420	 */
421	vm_phys_init();
422
423	/*
424	 * Add every available physical page that is not blacklisted to
425	 * the free lists.
426	 */
427	cnt.v_page_count = 0;
428	cnt.v_free_count = 0;
429	list = getenv("vm.blacklist");
430	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
431		pa = phys_avail[i];
432		last_pa = phys_avail[i + 1];
433		while (pa < last_pa) {
434			if (list != NULL &&
435			    vm_page_blacklist_lookup(list, pa))
436				printf("Skipping page with pa 0x%jx\n",
437				    (uintmax_t)pa);
438			else
439				vm_phys_add_page(pa);
440			pa += PAGE_SIZE;
441		}
442	}
443	freeenv(list);
444#if VM_NRESERVLEVEL > 0
445	/*
446	 * Initialize the reservation management system.
447	 */
448	vm_reserv_init();
449#endif
450	return (vaddr);
451}
452
453void
454vm_page_reference(vm_page_t m)
455{
456
457	vm_page_aflag_set(m, PGA_REFERENCED);
458}
459
460void
461vm_page_busy(vm_page_t m)
462{
463
464	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
465	KASSERT((m->oflags & VPO_BUSY) == 0,
466	    ("vm_page_busy: page already busy!!!"));
467	m->oflags |= VPO_BUSY;
468}
469
470/*
471 *      vm_page_flash:
472 *
473 *      wakeup anyone waiting for the page.
474 */
475void
476vm_page_flash(vm_page_t m)
477{
478
479	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
480	if (m->oflags & VPO_WANTED) {
481		m->oflags &= ~VPO_WANTED;
482		wakeup(m);
483	}
484}
485
486/*
487 *      vm_page_wakeup:
488 *
489 *      clear the VPO_BUSY flag and wakeup anyone waiting for the
490 *      page.
491 *
492 */
493void
494vm_page_wakeup(vm_page_t m)
495{
496
497	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
498	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
499	m->oflags &= ~VPO_BUSY;
500	vm_page_flash(m);
501}
502
503void
504vm_page_io_start(vm_page_t m)
505{
506
507	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
508	m->busy++;
509}
510
511void
512vm_page_io_finish(vm_page_t m)
513{
514
515	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
516	KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
517	m->busy--;
518	if (m->busy == 0)
519		vm_page_flash(m);
520}
521
522/*
523 * Keep page from being freed by the page daemon
524 * much of the same effect as wiring, except much lower
525 * overhead and should be used only for *very* temporary
526 * holding ("wiring").
527 */
528void
529vm_page_hold(vm_page_t mem)
530{
531
532	vm_page_lock_assert(mem, MA_OWNED);
533        mem->hold_count++;
534}
535
536void
537vm_page_unhold(vm_page_t mem)
538{
539
540	vm_page_lock_assert(mem, MA_OWNED);
541	--mem->hold_count;
542	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
543	if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
544		vm_page_free_toq(mem);
545}
546
547/*
548 *	vm_page_unhold_pages:
549 *
550 *	Unhold each of the pages that is referenced by the given array.
551 */
552void
553vm_page_unhold_pages(vm_page_t *ma, int count)
554{
555	struct mtx *mtx, *new_mtx;
556
557	mtx = NULL;
558	for (; count != 0; count--) {
559		/*
560		 * Avoid releasing and reacquiring the same page lock.
561		 */
562		new_mtx = vm_page_lockptr(*ma);
563		if (mtx != new_mtx) {
564			if (mtx != NULL)
565				mtx_unlock(mtx);
566			mtx = new_mtx;
567			mtx_lock(mtx);
568		}
569		vm_page_unhold(*ma);
570		ma++;
571	}
572	if (mtx != NULL)
573		mtx_unlock(mtx);
574}
575
576vm_page_t
577PHYS_TO_VM_PAGE(vm_paddr_t pa)
578{
579	vm_page_t m;
580
581#ifdef VM_PHYSSEG_SPARSE
582	m = vm_phys_paddr_to_vm_page(pa);
583	if (m == NULL)
584		m = vm_phys_fictitious_to_vm_page(pa);
585	return (m);
586#elif defined(VM_PHYSSEG_DENSE)
587	long pi;
588
589	pi = atop(pa);
590	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
591		m = &vm_page_array[pi - first_page];
592		return (m);
593	}
594	return (vm_phys_fictitious_to_vm_page(pa));
595#else
596#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
597#endif
598}
599
600/*
601 *	vm_page_getfake:
602 *
603 *	Create a fictitious page with the specified physical address and
604 *	memory attribute.  The memory attribute is the only the machine-
605 *	dependent aspect of a fictitious page that must be initialized.
606 */
607vm_page_t
608vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
609{
610	vm_page_t m;
611
612	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
613	vm_page_initfake(m, paddr, memattr);
614	return (m);
615}
616
617void
618vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
619{
620
621	if ((m->flags & PG_FICTITIOUS) != 0) {
622		/*
623		 * The page's memattr might have changed since the
624		 * previous initialization.  Update the pmap to the
625		 * new memattr.
626		 */
627		goto memattr;
628	}
629	m->phys_addr = paddr;
630	m->queue = PQ_NONE;
631	/* Fictitious pages don't use "segind". */
632	m->flags = PG_FICTITIOUS;
633	/* Fictitious pages don't use "order" or "pool". */
634	m->oflags = VPO_BUSY | VPO_UNMANAGED;
635	m->wire_count = 1;
636memattr:
637	pmap_page_set_memattr(m, memattr);
638}
639
640/*
641 *	vm_page_putfake:
642 *
643 *	Release a fictitious page.
644 */
645void
646vm_page_putfake(vm_page_t m)
647{
648
649	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
650	KASSERT((m->flags & PG_FICTITIOUS) != 0,
651	    ("vm_page_putfake: bad page %p", m));
652	uma_zfree(fakepg_zone, m);
653}
654
655/*
656 *	vm_page_updatefake:
657 *
658 *	Update the given fictitious page to the specified physical address and
659 *	memory attribute.
660 */
661void
662vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
663{
664
665	KASSERT((m->flags & PG_FICTITIOUS) != 0,
666	    ("vm_page_updatefake: bad page %p", m));
667	m->phys_addr = paddr;
668	pmap_page_set_memattr(m, memattr);
669}
670
671/*
672 *	vm_page_free:
673 *
674 *	Free a page.
675 */
676void
677vm_page_free(vm_page_t m)
678{
679
680	m->flags &= ~PG_ZERO;
681	vm_page_free_toq(m);
682}
683
684/*
685 *	vm_page_free_zero:
686 *
687 *	Free a page to the zerod-pages queue
688 */
689void
690vm_page_free_zero(vm_page_t m)
691{
692
693	m->flags |= PG_ZERO;
694	vm_page_free_toq(m);
695}
696
697/*
698 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
699 * array which is not the request page.
700 */
701void
702vm_page_readahead_finish(vm_page_t m)
703{
704
705	if (m->valid != 0) {
706		/*
707		 * Since the page is not the requested page, whether
708		 * it should be activated or deactivated is not
709		 * obvious.  Empirical results have shown that
710		 * deactivating the page is usually the best choice,
711		 * unless the page is wanted by another thread.
712		 */
713		if (m->oflags & VPO_WANTED) {
714			vm_page_lock(m);
715			vm_page_activate(m);
716			vm_page_unlock(m);
717		} else {
718			vm_page_lock(m);
719			vm_page_deactivate(m);
720			vm_page_unlock(m);
721		}
722		vm_page_wakeup(m);
723	} else {
724		/*
725		 * Free the completely invalid page.  Such page state
726		 * occurs due to the short read operation which did
727		 * not covered our page at all, or in case when a read
728		 * error happens.
729		 */
730		vm_page_lock(m);
731		vm_page_free(m);
732		vm_page_unlock(m);
733	}
734}
735
736/*
737 *	vm_page_sleep:
738 *
739 *	Sleep and release the page lock.
740 *
741 *	The object containing the given page must be locked.
742 */
743void
744vm_page_sleep(vm_page_t m, const char *msg)
745{
746
747	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
748	if (mtx_owned(vm_page_lockptr(m)))
749		vm_page_unlock(m);
750
751	/*
752	 * It's possible that while we sleep, the page will get
753	 * unbusied and freed.  If we are holding the object
754	 * lock, we will assume we hold a reference to the object
755	 * such that even if m->object changes, we can re-lock
756	 * it.
757	 */
758	m->oflags |= VPO_WANTED;
759	msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
760}
761
762/*
763 *	vm_page_dirty_KBI:		[ internal use only ]
764 *
765 *	Set all bits in the page's dirty field.
766 *
767 *	The object containing the specified page must be locked if the
768 *	call is made from the machine-independent layer.
769 *
770 *	See vm_page_clear_dirty_mask().
771 *
772 *	This function should only be called by vm_page_dirty().
773 */
774void
775vm_page_dirty_KBI(vm_page_t m)
776{
777
778	/* These assertions refer to this operation by its public name. */
779	KASSERT((m->flags & PG_CACHED) == 0,
780	    ("vm_page_dirty: page in cache!"));
781	KASSERT(!VM_PAGE_IS_FREE(m),
782	    ("vm_page_dirty: page is free!"));
783	KASSERT(m->valid == VM_PAGE_BITS_ALL,
784	    ("vm_page_dirty: page is invalid!"));
785	m->dirty = VM_PAGE_BITS_ALL;
786}
787
788/*
789 *	vm_page_splay:
790 *
791 *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
792 *	the vm_page containing the given pindex.  If, however, that
793 *	pindex is not found in the vm_object, returns a vm_page that is
794 *	adjacent to the pindex, coming before or after it.
795 */
796vm_page_t
797vm_page_splay(vm_pindex_t pindex, vm_page_t root)
798{
799	struct vm_page dummy;
800	vm_page_t lefttreemax, righttreemin, y;
801
802	if (root == NULL)
803		return (root);
804	lefttreemax = righttreemin = &dummy;
805	for (;; root = y) {
806		if (pindex < root->pindex) {
807			if ((y = root->left) == NULL)
808				break;
809			if (pindex < y->pindex) {
810				/* Rotate right. */
811				root->left = y->right;
812				y->right = root;
813				root = y;
814				if ((y = root->left) == NULL)
815					break;
816			}
817			/* Link into the new root's right tree. */
818			righttreemin->left = root;
819			righttreemin = root;
820		} else if (pindex > root->pindex) {
821			if ((y = root->right) == NULL)
822				break;
823			if (pindex > y->pindex) {
824				/* Rotate left. */
825				root->right = y->left;
826				y->left = root;
827				root = y;
828				if ((y = root->right) == NULL)
829					break;
830			}
831			/* Link into the new root's left tree. */
832			lefttreemax->right = root;
833			lefttreemax = root;
834		} else
835			break;
836	}
837	/* Assemble the new root. */
838	lefttreemax->right = root->left;
839	righttreemin->left = root->right;
840	root->left = dummy.right;
841	root->right = dummy.left;
842	return (root);
843}
844
845/*
846 *	vm_page_insert:		[ internal use only ]
847 *
848 *	Inserts the given mem entry into the object and object list.
849 *
850 *	The pagetables are not updated but will presumably fault the page
851 *	in if necessary, or if a kernel page the caller will at some point
852 *	enter the page into the kernel's pmap.  We are not allowed to sleep
853 *	here so we *can't* do this anyway.
854 *
855 *	The object must be locked.
856 */
857void
858vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
859{
860	vm_page_t root;
861
862	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
863	if (m->object != NULL)
864		panic("vm_page_insert: page already inserted");
865
866	/*
867	 * Record the object/offset pair in this page
868	 */
869	m->object = object;
870	m->pindex = pindex;
871
872	/*
873	 * Now link into the object's ordered list of backed pages.
874	 */
875	root = object->root;
876	if (root == NULL) {
877		m->left = NULL;
878		m->right = NULL;
879		TAILQ_INSERT_TAIL(&object->memq, m, listq);
880	} else {
881		root = vm_page_splay(pindex, root);
882		if (pindex < root->pindex) {
883			m->left = root->left;
884			m->right = root;
885			root->left = NULL;
886			TAILQ_INSERT_BEFORE(root, m, listq);
887		} else if (pindex == root->pindex)
888			panic("vm_page_insert: offset already allocated");
889		else {
890			m->right = root->right;
891			m->left = root;
892			root->right = NULL;
893			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
894		}
895	}
896	object->root = m;
897
898	/*
899	 * Show that the object has one more resident page.
900	 */
901	object->resident_page_count++;
902
903	/*
904	 * Hold the vnode until the last page is released.
905	 */
906	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
907		vhold(object->handle);
908
909	/*
910	 * Since we are inserting a new and possibly dirty page,
911	 * update the object's OBJ_MIGHTBEDIRTY flag.
912	 */
913	if (pmap_page_is_write_mapped(m))
914		vm_object_set_writeable_dirty(object);
915}
916
917/*
918 *	vm_page_remove:
919 *
920 *	Removes the given mem entry from the object/offset-page
921 *	table and the object page list, but do not invalidate/terminate
922 *	the backing store.
923 *
924 *	The underlying pmap entry (if any) is NOT removed here.
925 *
926 *	The object must be locked.  The page must be locked if it is managed.
927 */
928void
929vm_page_remove(vm_page_t m)
930{
931	vm_object_t object;
932	vm_page_t next, prev, root;
933
934	if ((m->oflags & VPO_UNMANAGED) == 0)
935		vm_page_lock_assert(m, MA_OWNED);
936	if ((object = m->object) == NULL)
937		return;
938	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
939	if (m->oflags & VPO_BUSY) {
940		m->oflags &= ~VPO_BUSY;
941		vm_page_flash(m);
942	}
943
944	/*
945	 * Now remove from the object's list of backed pages.
946	 */
947	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
948		/*
949		 * Since the page's successor in the list is also its parent
950		 * in the tree, its right subtree must be empty.
951		 */
952		next->left = m->left;
953		KASSERT(m->right == NULL,
954		    ("vm_page_remove: page %p has right child", m));
955	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
956	    prev->right == m) {
957		/*
958		 * Since the page's predecessor in the list is also its parent
959		 * in the tree, its left subtree must be empty.
960		 */
961		KASSERT(m->left == NULL,
962		    ("vm_page_remove: page %p has left child", m));
963		prev->right = m->right;
964	} else {
965		if (m != object->root)
966			vm_page_splay(m->pindex, object->root);
967		if (m->left == NULL)
968			root = m->right;
969		else if (m->right == NULL)
970			root = m->left;
971		else {
972			/*
973			 * Move the page's successor to the root, because
974			 * pages are usually removed in ascending order.
975			 */
976			if (m->right != next)
977				vm_page_splay(m->pindex, m->right);
978			next->left = m->left;
979			root = next;
980		}
981		object->root = root;
982	}
983	TAILQ_REMOVE(&object->memq, m, listq);
984
985	/*
986	 * And show that the object has one fewer resident page.
987	 */
988	object->resident_page_count--;
989
990	/*
991	 * The vnode may now be recycled.
992	 */
993	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
994		vdrop(object->handle);
995
996	m->object = NULL;
997}
998
999/*
1000 *	vm_page_lookup:
1001 *
1002 *	Returns the page associated with the object/offset
1003 *	pair specified; if none is found, NULL is returned.
1004 *
1005 *	The object must be locked.
1006 */
1007vm_page_t
1008vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1009{
1010	vm_page_t m;
1011
1012	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1013	if ((m = object->root) != NULL && m->pindex != pindex) {
1014		m = vm_page_splay(pindex, m);
1015		if ((object->root = m)->pindex != pindex)
1016			m = NULL;
1017	}
1018	return (m);
1019}
1020
1021/*
1022 *	vm_page_find_least:
1023 *
1024 *	Returns the page associated with the object with least pindex
1025 *	greater than or equal to the parameter pindex, or NULL.
1026 *
1027 *	The object must be locked.
1028 */
1029vm_page_t
1030vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1031{
1032	vm_page_t m;
1033
1034	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1035	if ((m = TAILQ_FIRST(&object->memq)) != NULL) {
1036		if (m->pindex < pindex) {
1037			m = vm_page_splay(pindex, object->root);
1038			if ((object->root = m)->pindex < pindex)
1039				m = TAILQ_NEXT(m, listq);
1040		}
1041	}
1042	return (m);
1043}
1044
1045/*
1046 * Returns the given page's successor (by pindex) within the object if it is
1047 * resident; if none is found, NULL is returned.
1048 *
1049 * The object must be locked.
1050 */
1051vm_page_t
1052vm_page_next(vm_page_t m)
1053{
1054	vm_page_t next;
1055
1056	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1057	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
1058	    next->pindex != m->pindex + 1)
1059		next = NULL;
1060	return (next);
1061}
1062
1063/*
1064 * Returns the given page's predecessor (by pindex) within the object if it is
1065 * resident; if none is found, NULL is returned.
1066 *
1067 * The object must be locked.
1068 */
1069vm_page_t
1070vm_page_prev(vm_page_t m)
1071{
1072	vm_page_t prev;
1073
1074	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1075	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1076	    prev->pindex != m->pindex - 1)
1077		prev = NULL;
1078	return (prev);
1079}
1080
1081/*
1082 *	vm_page_rename:
1083 *
1084 *	Move the given memory entry from its
1085 *	current object to the specified target object/offset.
1086 *
1087 *	Note: swap associated with the page must be invalidated by the move.  We
1088 *	      have to do this for several reasons:  (1) we aren't freeing the
1089 *	      page, (2) we are dirtying the page, (3) the VM system is probably
1090 *	      moving the page from object A to B, and will then later move
1091 *	      the backing store from A to B and we can't have a conflict.
1092 *
1093 *	Note: we *always* dirty the page.  It is necessary both for the
1094 *	      fact that we moved it, and because we may be invalidating
1095 *	      swap.  If the page is on the cache, we have to deactivate it
1096 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
1097 *	      on the cache.
1098 *
1099 *	The objects must be locked.  The page must be locked if it is managed.
1100 */
1101void
1102vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1103{
1104
1105	vm_page_remove(m);
1106	vm_page_insert(m, new_object, new_pindex);
1107	vm_page_dirty(m);
1108}
1109
1110/*
1111 *	Convert all of the given object's cached pages that have a
1112 *	pindex within the given range into free pages.  If the value
1113 *	zero is given for "end", then the range's upper bound is
1114 *	infinity.  If the given object is backed by a vnode and it
1115 *	transitions from having one or more cached pages to none, the
1116 *	vnode's hold count is reduced.
1117 */
1118void
1119vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1120{
1121	vm_page_t m, m_next;
1122	boolean_t empty;
1123
1124	mtx_lock(&vm_page_queue_free_mtx);
1125	if (__predict_false(object->cache == NULL)) {
1126		mtx_unlock(&vm_page_queue_free_mtx);
1127		return;
1128	}
1129	m = object->cache = vm_page_splay(start, object->cache);
1130	if (m->pindex < start) {
1131		if (m->right == NULL)
1132			m = NULL;
1133		else {
1134			m_next = vm_page_splay(start, m->right);
1135			m_next->left = m;
1136			m->right = NULL;
1137			m = object->cache = m_next;
1138		}
1139	}
1140
1141	/*
1142	 * At this point, "m" is either (1) a reference to the page
1143	 * with the least pindex that is greater than or equal to
1144	 * "start" or (2) NULL.
1145	 */
1146	for (; m != NULL && (m->pindex < end || end == 0); m = m_next) {
1147		/*
1148		 * Find "m"'s successor and remove "m" from the
1149		 * object's cache.
1150		 */
1151		if (m->right == NULL) {
1152			object->cache = m->left;
1153			m_next = NULL;
1154		} else {
1155			m_next = vm_page_splay(start, m->right);
1156			m_next->left = m->left;
1157			object->cache = m_next;
1158		}
1159		/* Convert "m" to a free page. */
1160		m->object = NULL;
1161		m->valid = 0;
1162		/* Clear PG_CACHED and set PG_FREE. */
1163		m->flags ^= PG_CACHED | PG_FREE;
1164		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
1165		    ("vm_page_cache_free: page %p has inconsistent flags", m));
1166		cnt.v_cache_count--;
1167		cnt.v_free_count++;
1168	}
1169	empty = object->cache == NULL;
1170	mtx_unlock(&vm_page_queue_free_mtx);
1171	if (object->type == OBJT_VNODE && empty)
1172		vdrop(object->handle);
1173}
1174
1175/*
1176 *	Returns the cached page that is associated with the given
1177 *	object and offset.  If, however, none exists, returns NULL.
1178 *
1179 *	The free page queue must be locked.
1180 */
1181static inline vm_page_t
1182vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1183{
1184	vm_page_t m;
1185
1186	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1187	if ((m = object->cache) != NULL && m->pindex != pindex) {
1188		m = vm_page_splay(pindex, m);
1189		if ((object->cache = m)->pindex != pindex)
1190			m = NULL;
1191	}
1192	return (m);
1193}
1194
1195/*
1196 *	Remove the given cached page from its containing object's
1197 *	collection of cached pages.
1198 *
1199 *	The free page queue must be locked.
1200 */
1201static void
1202vm_page_cache_remove(vm_page_t m)
1203{
1204	vm_object_t object;
1205	vm_page_t root;
1206
1207	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1208	KASSERT((m->flags & PG_CACHED) != 0,
1209	    ("vm_page_cache_remove: page %p is not cached", m));
1210	object = m->object;
1211	if (m != object->cache) {
1212		root = vm_page_splay(m->pindex, object->cache);
1213		KASSERT(root == m,
1214		    ("vm_page_cache_remove: page %p is not cached in object %p",
1215		    m, object));
1216	}
1217	if (m->left == NULL)
1218		root = m->right;
1219	else if (m->right == NULL)
1220		root = m->left;
1221	else {
1222		root = vm_page_splay(m->pindex, m->left);
1223		root->right = m->right;
1224	}
1225	object->cache = root;
1226	m->object = NULL;
1227	cnt.v_cache_count--;
1228}
1229
1230/*
1231 *	Transfer all of the cached pages with offset greater than or
1232 *	equal to 'offidxstart' from the original object's cache to the
1233 *	new object's cache.  However, any cached pages with offset
1234 *	greater than or equal to the new object's size are kept in the
1235 *	original object.  Initially, the new object's cache must be
1236 *	empty.  Offset 'offidxstart' in the original object must
1237 *	correspond to offset zero in the new object.
1238 *
1239 *	The new object must be locked.
1240 */
1241void
1242vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1243    vm_object_t new_object)
1244{
1245	vm_page_t m, m_next;
1246
1247	/*
1248	 * Insertion into an object's collection of cached pages
1249	 * requires the object to be locked.  In contrast, removal does
1250	 * not.
1251	 */
1252	VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
1253	KASSERT(new_object->cache == NULL,
1254	    ("vm_page_cache_transfer: object %p has cached pages",
1255	    new_object));
1256	mtx_lock(&vm_page_queue_free_mtx);
1257	if ((m = orig_object->cache) != NULL) {
1258		/*
1259		 * Transfer all of the pages with offset greater than or
1260		 * equal to 'offidxstart' from the original object's
1261		 * cache to the new object's cache.
1262		 */
1263		m = vm_page_splay(offidxstart, m);
1264		if (m->pindex < offidxstart) {
1265			orig_object->cache = m;
1266			new_object->cache = m->right;
1267			m->right = NULL;
1268		} else {
1269			orig_object->cache = m->left;
1270			new_object->cache = m;
1271			m->left = NULL;
1272		}
1273		while ((m = new_object->cache) != NULL) {
1274			if ((m->pindex - offidxstart) >= new_object->size) {
1275				/*
1276				 * Return all of the cached pages with
1277				 * offset greater than or equal to the
1278				 * new object's size to the original
1279				 * object's cache.
1280				 */
1281				new_object->cache = m->left;
1282				m->left = orig_object->cache;
1283				orig_object->cache = m;
1284				break;
1285			}
1286			m_next = vm_page_splay(m->pindex, m->right);
1287			/* Update the page's object and offset. */
1288			m->object = new_object;
1289			m->pindex -= offidxstart;
1290			if (m_next == NULL)
1291				break;
1292			m->right = NULL;
1293			m_next->left = m;
1294			new_object->cache = m_next;
1295		}
1296		KASSERT(new_object->cache == NULL ||
1297		    new_object->type == OBJT_SWAP,
1298		    ("vm_page_cache_transfer: object %p's type is incompatible"
1299		    " with cached pages", new_object));
1300	}
1301	mtx_unlock(&vm_page_queue_free_mtx);
1302}
1303
1304/*
1305 *	Returns TRUE if a cached page is associated with the given object and
1306 *	offset, and FALSE otherwise.
1307 *
1308 *	The object must be locked.
1309 */
1310boolean_t
1311vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1312{
1313	vm_page_t m;
1314
1315	/*
1316	 * Insertion into an object's collection of cached pages requires the
1317	 * object to be locked.  Therefore, if the object is locked and the
1318	 * object's collection is empty, there is no need to acquire the free
1319	 * page queues lock in order to prove that the specified page doesn't
1320	 * exist.
1321	 */
1322	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1323	if (__predict_true(object->cache == NULL))
1324		return (FALSE);
1325	mtx_lock(&vm_page_queue_free_mtx);
1326	m = vm_page_cache_lookup(object, pindex);
1327	mtx_unlock(&vm_page_queue_free_mtx);
1328	return (m != NULL);
1329}
1330
1331/*
1332 *	vm_page_alloc:
1333 *
1334 *	Allocate and return a page that is associated with the specified
1335 *	object and offset pair.  By default, this page has the flag VPO_BUSY
1336 *	set.
1337 *
1338 *	The caller must always specify an allocation class.
1339 *
1340 *	allocation classes:
1341 *	VM_ALLOC_NORMAL		normal process request
1342 *	VM_ALLOC_SYSTEM		system *really* needs a page
1343 *	VM_ALLOC_INTERRUPT	interrupt time request
1344 *
1345 *	optional allocation flags:
1346 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1347 *				intends to allocate
1348 *	VM_ALLOC_IFCACHED	return page only if it is cached
1349 *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1350 *				is cached
1351 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1352 *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1353 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1354 *				should not have the flag VPO_BUSY set
1355 *	VM_ALLOC_WIRED		wire the allocated page
1356 *	VM_ALLOC_ZERO		prefer a zeroed page
1357 *
1358 *	This routine may not sleep.
1359 */
1360vm_page_t
1361vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1362{
1363	struct vnode *vp = NULL;
1364	vm_object_t m_object;
1365	vm_page_t m;
1366	int flags, req_class;
1367
1368	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1369	    ("vm_page_alloc: inconsistent object/req"));
1370	if (object != NULL)
1371		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1372
1373	req_class = req & VM_ALLOC_CLASS_MASK;
1374
1375	/*
1376	 * The page daemon is allowed to dig deeper into the free page list.
1377	 */
1378	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1379		req_class = VM_ALLOC_SYSTEM;
1380
1381	mtx_lock(&vm_page_queue_free_mtx);
1382	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1383	    (req_class == VM_ALLOC_SYSTEM &&
1384	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1385	    (req_class == VM_ALLOC_INTERRUPT &&
1386	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1387		/*
1388		 * Allocate from the free queue if the number of free pages
1389		 * exceeds the minimum for the request class.
1390		 */
1391		if (object != NULL &&
1392		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1393			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1394				mtx_unlock(&vm_page_queue_free_mtx);
1395				return (NULL);
1396			}
1397			if (vm_phys_unfree_page(m))
1398				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1399#if VM_NRESERVLEVEL > 0
1400			else if (!vm_reserv_reactivate_page(m))
1401#else
1402			else
1403#endif
1404				panic("vm_page_alloc: cache page %p is missing"
1405				    " from the free queue", m);
1406		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1407			mtx_unlock(&vm_page_queue_free_mtx);
1408			return (NULL);
1409#if VM_NRESERVLEVEL > 0
1410		} else if (object == NULL || object->type == OBJT_DEVICE ||
1411		    object->type == OBJT_SG ||
1412		    (object->flags & OBJ_COLORED) == 0 ||
1413		    (m = vm_reserv_alloc_page(object, pindex)) == NULL) {
1414#else
1415		} else {
1416#endif
1417			m = vm_phys_alloc_pages(object != NULL ?
1418			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1419#if VM_NRESERVLEVEL > 0
1420			if (m == NULL && vm_reserv_reclaim_inactive()) {
1421				m = vm_phys_alloc_pages(object != NULL ?
1422				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1423				    0);
1424			}
1425#endif
1426		}
1427	} else {
1428		/*
1429		 * Not allocatable, give up.
1430		 */
1431		mtx_unlock(&vm_page_queue_free_mtx);
1432		atomic_add_int(&vm_pageout_deficit,
1433		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1434		pagedaemon_wakeup();
1435		return (NULL);
1436	}
1437
1438	/*
1439	 *  At this point we had better have found a good page.
1440	 */
1441	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1442	KASSERT(m->queue == PQ_NONE,
1443	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1444	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1445	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1446	KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
1447	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1448	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1449	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1450	    pmap_page_get_memattr(m)));
1451	if ((m->flags & PG_CACHED) != 0) {
1452		KASSERT((m->flags & PG_ZERO) == 0,
1453		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1454		KASSERT(m->valid != 0,
1455		    ("vm_page_alloc: cached page %p is invalid", m));
1456		if (m->object == object && m->pindex == pindex)
1457	  		cnt.v_reactivated++;
1458		else
1459			m->valid = 0;
1460		m_object = m->object;
1461		vm_page_cache_remove(m);
1462		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1463			vp = m_object->handle;
1464	} else {
1465		KASSERT(VM_PAGE_IS_FREE(m),
1466		    ("vm_page_alloc: page %p is not free", m));
1467		KASSERT(m->valid == 0,
1468		    ("vm_page_alloc: free page %p is valid", m));
1469		cnt.v_free_count--;
1470	}
1471
1472	/*
1473	 * Only the PG_ZERO flag is inherited.  The PG_CACHED or PG_FREE flag
1474	 * must be cleared before the free page queues lock is released.
1475	 */
1476	flags = 0;
1477	if (req & VM_ALLOC_NODUMP)
1478		flags |= PG_NODUMP;
1479	if (m->flags & PG_ZERO) {
1480		vm_page_zero_count--;
1481		if (req & VM_ALLOC_ZERO)
1482			flags = PG_ZERO;
1483	}
1484	m->flags = flags;
1485	mtx_unlock(&vm_page_queue_free_mtx);
1486	m->aflags = 0;
1487	if (object == NULL || object->type == OBJT_PHYS)
1488		m->oflags = VPO_UNMANAGED;
1489	else
1490		m->oflags = 0;
1491	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
1492		m->oflags |= VPO_BUSY;
1493	if (req & VM_ALLOC_WIRED) {
1494		/*
1495		 * The page lock is not required for wiring a page until that
1496		 * page is inserted into the object.
1497		 */
1498		atomic_add_int(&cnt.v_wire_count, 1);
1499		m->wire_count = 1;
1500	}
1501	m->act_count = 0;
1502
1503	if (object != NULL) {
1504		/* Ignore device objects; the pager sets "memattr" for them. */
1505		if (object->memattr != VM_MEMATTR_DEFAULT &&
1506		    object->type != OBJT_DEVICE && object->type != OBJT_SG)
1507			pmap_page_set_memattr(m, object->memattr);
1508		vm_page_insert(m, object, pindex);
1509	} else
1510		m->pindex = pindex;
1511
1512	/*
1513	 * The following call to vdrop() must come after the above call
1514	 * to vm_page_insert() in case both affect the same object and
1515	 * vnode.  Otherwise, the affected vnode's hold count could
1516	 * temporarily become zero.
1517	 */
1518	if (vp != NULL)
1519		vdrop(vp);
1520
1521	/*
1522	 * Don't wakeup too often - wakeup the pageout daemon when
1523	 * we would be nearly out of memory.
1524	 */
1525	if (vm_paging_needed())
1526		pagedaemon_wakeup();
1527
1528	return (m);
1529}
1530
1531/*
1532 *	vm_page_alloc_contig:
1533 *
1534 *	Allocate a contiguous set of physical pages of the given size "npages"
1535 *	from the free lists.  All of the physical pages must be at or above
1536 *	the given physical address "low" and below the given physical address
1537 *	"high".  The given value "alignment" determines the alignment of the
1538 *	first physical page in the set.  If the given value "boundary" is
1539 *	non-zero, then the set of physical pages cannot cross any physical
1540 *	address boundary that is a multiple of that value.  Both "alignment"
1541 *	and "boundary" must be a power of two.
1542 *
1543 *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1544 *	then the memory attribute setting for the physical pages is configured
1545 *	to the object's memory attribute setting.  Otherwise, the memory
1546 *	attribute setting for the physical pages is configured to "memattr",
1547 *	overriding the object's memory attribute setting.  However, if the
1548 *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1549 *	memory attribute setting for the physical pages cannot be configured
1550 *	to VM_MEMATTR_DEFAULT.
1551 *
1552 *	The caller must always specify an allocation class.
1553 *
1554 *	allocation classes:
1555 *	VM_ALLOC_NORMAL		normal process request
1556 *	VM_ALLOC_SYSTEM		system *really* needs a page
1557 *	VM_ALLOC_INTERRUPT	interrupt time request
1558 *
1559 *	optional allocation flags:
1560 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1561 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1562 *				should not have the flag VPO_BUSY set
1563 *	VM_ALLOC_WIRED		wire the allocated page
1564 *	VM_ALLOC_ZERO		prefer a zeroed page
1565 *
1566 *	This routine may not sleep.
1567 */
1568vm_page_t
1569vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1570    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1571    vm_paddr_t boundary, vm_memattr_t memattr)
1572{
1573	struct vnode *drop;
1574	vm_page_t deferred_vdrop_list, m, m_ret;
1575	u_int flags, oflags;
1576	int req_class;
1577
1578	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1579	    ("vm_page_alloc_contig: inconsistent object/req"));
1580	if (object != NULL) {
1581		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1582		KASSERT(object->type == OBJT_PHYS,
1583		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1584		    object));
1585	}
1586	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1587	req_class = req & VM_ALLOC_CLASS_MASK;
1588
1589	/*
1590	 * The page daemon is allowed to dig deeper into the free page list.
1591	 */
1592	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1593		req_class = VM_ALLOC_SYSTEM;
1594
1595	deferred_vdrop_list = NULL;
1596	mtx_lock(&vm_page_queue_free_mtx);
1597	if (cnt.v_free_count + cnt.v_cache_count >= npages +
1598	    cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1599	    cnt.v_free_count + cnt.v_cache_count >= npages +
1600	    cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1601	    cnt.v_free_count + cnt.v_cache_count >= npages)) {
1602#if VM_NRESERVLEVEL > 0
1603retry:
1604		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1605		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1606		    low, high, alignment, boundary)) == NULL)
1607#endif
1608			m_ret = vm_phys_alloc_contig(npages, low, high,
1609			    alignment, boundary);
1610	} else {
1611		mtx_unlock(&vm_page_queue_free_mtx);
1612		atomic_add_int(&vm_pageout_deficit, npages);
1613		pagedaemon_wakeup();
1614		return (NULL);
1615	}
1616	if (m_ret != NULL)
1617		for (m = m_ret; m < &m_ret[npages]; m++) {
1618			drop = vm_page_alloc_init(m);
1619			if (drop != NULL) {
1620				/*
1621				 * Enqueue the vnode for deferred vdrop().
1622				 *
1623				 * Once the pages are removed from the free
1624				 * page list, "pageq" can be safely abused to
1625				 * construct a short-lived list of vnodes.
1626				 */
1627				m->pageq.tqe_prev = (void *)drop;
1628				m->pageq.tqe_next = deferred_vdrop_list;
1629				deferred_vdrop_list = m;
1630			}
1631		}
1632	else {
1633#if VM_NRESERVLEVEL > 0
1634		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1635		    boundary))
1636			goto retry;
1637#endif
1638	}
1639	mtx_unlock(&vm_page_queue_free_mtx);
1640	if (m_ret == NULL)
1641		return (NULL);
1642
1643	/*
1644	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1645	 */
1646	flags = 0;
1647	if ((req & VM_ALLOC_ZERO) != 0)
1648		flags = PG_ZERO;
1649	if ((req & VM_ALLOC_NODUMP) != 0)
1650		flags |= PG_NODUMP;
1651	if ((req & VM_ALLOC_WIRED) != 0)
1652		atomic_add_int(&cnt.v_wire_count, npages);
1653	oflags = VPO_UNMANAGED;
1654	if (object != NULL) {
1655		if ((req & VM_ALLOC_NOBUSY) == 0)
1656			oflags |= VPO_BUSY;
1657		if (object->memattr != VM_MEMATTR_DEFAULT &&
1658		    memattr == VM_MEMATTR_DEFAULT)
1659			memattr = object->memattr;
1660	}
1661	for (m = m_ret; m < &m_ret[npages]; m++) {
1662		m->aflags = 0;
1663		m->flags = (m->flags | PG_NODUMP) & flags;
1664		if ((req & VM_ALLOC_WIRED) != 0)
1665			m->wire_count = 1;
1666		/* Unmanaged pages don't use "act_count". */
1667		m->oflags = oflags;
1668		if (memattr != VM_MEMATTR_DEFAULT)
1669			pmap_page_set_memattr(m, memattr);
1670		if (object != NULL)
1671			vm_page_insert(m, object, pindex);
1672		else
1673			m->pindex = pindex;
1674		pindex++;
1675	}
1676	while (deferred_vdrop_list != NULL) {
1677		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1678		deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1679	}
1680	if (vm_paging_needed())
1681		pagedaemon_wakeup();
1682	return (m_ret);
1683}
1684
1685/*
1686 * Initialize a page that has been freshly dequeued from a freelist.
1687 * The caller has to drop the vnode returned, if it is not NULL.
1688 *
1689 * This function may only be used to initialize unmanaged pages.
1690 *
1691 * To be called with vm_page_queue_free_mtx held.
1692 */
1693static struct vnode *
1694vm_page_alloc_init(vm_page_t m)
1695{
1696	struct vnode *drop;
1697	vm_object_t m_object;
1698
1699	KASSERT(m->queue == PQ_NONE,
1700	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1701	    m, m->queue));
1702	KASSERT(m->wire_count == 0,
1703	    ("vm_page_alloc_init: page %p is wired", m));
1704	KASSERT(m->hold_count == 0,
1705	    ("vm_page_alloc_init: page %p is held", m));
1706	KASSERT(m->busy == 0,
1707	    ("vm_page_alloc_init: page %p is busy", m));
1708	KASSERT(m->dirty == 0,
1709	    ("vm_page_alloc_init: page %p is dirty", m));
1710	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1711	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1712	    m, pmap_page_get_memattr(m)));
1713	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1714	drop = NULL;
1715	if ((m->flags & PG_CACHED) != 0) {
1716		KASSERT((m->flags & PG_ZERO) == 0,
1717		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1718		m->valid = 0;
1719		m_object = m->object;
1720		vm_page_cache_remove(m);
1721		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1722			drop = m_object->handle;
1723	} else {
1724		KASSERT(VM_PAGE_IS_FREE(m),
1725		    ("vm_page_alloc_init: page %p is not free", m));
1726		KASSERT(m->valid == 0,
1727		    ("vm_page_alloc_init: free page %p is valid", m));
1728		cnt.v_free_count--;
1729		if ((m->flags & PG_ZERO) != 0)
1730			vm_page_zero_count--;
1731	}
1732	/* Don't clear the PG_ZERO flag; we'll need it later. */
1733	m->flags &= PG_ZERO;
1734	return (drop);
1735}
1736
1737/*
1738 * 	vm_page_alloc_freelist:
1739 *
1740 *	Allocate a physical page from the specified free page list.
1741 *
1742 *	The caller must always specify an allocation class.
1743 *
1744 *	allocation classes:
1745 *	VM_ALLOC_NORMAL		normal process request
1746 *	VM_ALLOC_SYSTEM		system *really* needs a page
1747 *	VM_ALLOC_INTERRUPT	interrupt time request
1748 *
1749 *	optional allocation flags:
1750 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1751 *				intends to allocate
1752 *	VM_ALLOC_WIRED		wire the allocated page
1753 *	VM_ALLOC_ZERO		prefer a zeroed page
1754 *
1755 *	This routine may not sleep.
1756 */
1757vm_page_t
1758vm_page_alloc_freelist(int flind, int req)
1759{
1760	struct vnode *drop;
1761	vm_page_t m;
1762	u_int flags;
1763	int req_class;
1764
1765	req_class = req & VM_ALLOC_CLASS_MASK;
1766
1767	/*
1768	 * The page daemon is allowed to dig deeper into the free page list.
1769	 */
1770	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1771		req_class = VM_ALLOC_SYSTEM;
1772
1773	/*
1774	 * Do not allocate reserved pages unless the req has asked for it.
1775	 */
1776	mtx_lock(&vm_page_queue_free_mtx);
1777	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1778	    (req_class == VM_ALLOC_SYSTEM &&
1779	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1780	    (req_class == VM_ALLOC_INTERRUPT &&
1781	    cnt.v_free_count + cnt.v_cache_count > 0))
1782		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
1783	else {
1784		mtx_unlock(&vm_page_queue_free_mtx);
1785		atomic_add_int(&vm_pageout_deficit,
1786		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1787		pagedaemon_wakeup();
1788		return (NULL);
1789	}
1790	if (m == NULL) {
1791		mtx_unlock(&vm_page_queue_free_mtx);
1792		return (NULL);
1793	}
1794	drop = vm_page_alloc_init(m);
1795	mtx_unlock(&vm_page_queue_free_mtx);
1796
1797	/*
1798	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1799	 */
1800	m->aflags = 0;
1801	flags = 0;
1802	if ((req & VM_ALLOC_ZERO) != 0)
1803		flags = PG_ZERO;
1804	m->flags &= flags;
1805	if ((req & VM_ALLOC_WIRED) != 0) {
1806		/*
1807		 * The page lock is not required for wiring a page that does
1808		 * not belong to an object.
1809		 */
1810		atomic_add_int(&cnt.v_wire_count, 1);
1811		m->wire_count = 1;
1812	}
1813	/* Unmanaged pages don't use "act_count". */
1814	m->oflags = VPO_UNMANAGED;
1815	if (drop != NULL)
1816		vdrop(drop);
1817	if (vm_paging_needed())
1818		pagedaemon_wakeup();
1819	return (m);
1820}
1821
1822/*
1823 *	vm_wait:	(also see VM_WAIT macro)
1824 *
1825 *	Sleep until free pages are available for allocation.
1826 *	- Called in various places before memory allocations.
1827 */
1828void
1829vm_wait(void)
1830{
1831
1832	mtx_lock(&vm_page_queue_free_mtx);
1833	if (curproc == pageproc) {
1834		vm_pageout_pages_needed = 1;
1835		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1836		    PDROP | PSWP, "VMWait", 0);
1837	} else {
1838		if (!vm_pages_needed) {
1839			vm_pages_needed = 1;
1840			wakeup(&vm_pages_needed);
1841		}
1842		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1843		    "vmwait", 0);
1844	}
1845}
1846
1847/*
1848 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1849 *
1850 *	Sleep until free pages are available for allocation.
1851 *	- Called only in vm_fault so that processes page faulting
1852 *	  can be easily tracked.
1853 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1854 *	  processes will be able to grab memory first.  Do not change
1855 *	  this balance without careful testing first.
1856 */
1857void
1858vm_waitpfault(void)
1859{
1860
1861	mtx_lock(&vm_page_queue_free_mtx);
1862	if (!vm_pages_needed) {
1863		vm_pages_needed = 1;
1864		wakeup(&vm_pages_needed);
1865	}
1866	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1867	    "pfault", 0);
1868}
1869
1870/*
1871 *	vm_page_requeue:
1872 *
1873 *	Move the given page to the tail of its present page queue.
1874 *
1875 *	The page queues must be locked.
1876 */
1877void
1878vm_page_requeue(vm_page_t m)
1879{
1880	struct vpgqueues *vpq;
1881	int queue;
1882
1883	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1884	queue = m->queue;
1885	KASSERT(queue != PQ_NONE,
1886	    ("vm_page_requeue: page %p is not queued", m));
1887	vpq = &vm_page_queues[queue];
1888	TAILQ_REMOVE(&vpq->pl, m, pageq);
1889	TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
1890}
1891
1892/*
1893 *	vm_page_queue_remove:
1894 *
1895 *	Remove the given page from the specified queue.
1896 *
1897 *	The page and page queues must be locked.
1898 */
1899static __inline void
1900vm_page_queue_remove(int queue, vm_page_t m)
1901{
1902	struct vpgqueues *pq;
1903
1904	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1905	vm_page_lock_assert(m, MA_OWNED);
1906	pq = &vm_page_queues[queue];
1907	TAILQ_REMOVE(&pq->pl, m, pageq);
1908	(*pq->cnt)--;
1909}
1910
1911/*
1912 *	vm_pageq_remove:
1913 *
1914 *	Remove a page from its queue.
1915 *
1916 *	The given page must be locked.
1917 */
1918void
1919vm_pageq_remove(vm_page_t m)
1920{
1921	int queue;
1922
1923	vm_page_lock_assert(m, MA_OWNED);
1924	if ((queue = m->queue) != PQ_NONE) {
1925		vm_page_lock_queues();
1926		m->queue = PQ_NONE;
1927		vm_page_queue_remove(queue, m);
1928		vm_page_unlock_queues();
1929	}
1930}
1931
1932/*
1933 *	vm_page_enqueue:
1934 *
1935 *	Add the given page to the specified queue.
1936 *
1937 *	The page queues must be locked.
1938 */
1939static void
1940vm_page_enqueue(int queue, vm_page_t m)
1941{
1942	struct vpgqueues *vpq;
1943
1944	vpq = &vm_page_queues[queue];
1945	m->queue = queue;
1946	TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
1947	++*vpq->cnt;
1948}
1949
1950/*
1951 *	vm_page_activate:
1952 *
1953 *	Put the specified page on the active list (if appropriate).
1954 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1955 *	mess with it.
1956 *
1957 *	The page must be locked.
1958 */
1959void
1960vm_page_activate(vm_page_t m)
1961{
1962	int queue;
1963
1964	vm_page_lock_assert(m, MA_OWNED);
1965	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1966	if ((queue = m->queue) != PQ_ACTIVE) {
1967		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
1968			if (m->act_count < ACT_INIT)
1969				m->act_count = ACT_INIT;
1970			vm_page_lock_queues();
1971			if (queue != PQ_NONE)
1972				vm_page_queue_remove(queue, m);
1973			vm_page_enqueue(PQ_ACTIVE, m);
1974			vm_page_unlock_queues();
1975		} else
1976			KASSERT(queue == PQ_NONE,
1977			    ("vm_page_activate: wired page %p is queued", m));
1978	} else {
1979		if (m->act_count < ACT_INIT)
1980			m->act_count = ACT_INIT;
1981	}
1982}
1983
1984/*
1985 *	vm_page_free_wakeup:
1986 *
1987 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1988 *	routine is called when a page has been added to the cache or free
1989 *	queues.
1990 *
1991 *	The page queues must be locked.
1992 */
1993static inline void
1994vm_page_free_wakeup(void)
1995{
1996
1997	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1998	/*
1999	 * if pageout daemon needs pages, then tell it that there are
2000	 * some free.
2001	 */
2002	if (vm_pageout_pages_needed &&
2003	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
2004		wakeup(&vm_pageout_pages_needed);
2005		vm_pageout_pages_needed = 0;
2006	}
2007	/*
2008	 * wakeup processes that are waiting on memory if we hit a
2009	 * high water mark. And wakeup scheduler process if we have
2010	 * lots of memory. this process will swapin processes.
2011	 */
2012	if (vm_pages_needed && !vm_page_count_min()) {
2013		vm_pages_needed = 0;
2014		wakeup(&cnt.v_free_count);
2015	}
2016}
2017
2018/*
2019 *	vm_page_free_toq:
2020 *
2021 *	Returns the given page to the free list,
2022 *	disassociating it with any VM object.
2023 *
2024 *	The object must be locked.  The page must be locked if it is managed.
2025 */
2026void
2027vm_page_free_toq(vm_page_t m)
2028{
2029
2030	if ((m->oflags & VPO_UNMANAGED) == 0) {
2031		vm_page_lock_assert(m, MA_OWNED);
2032		KASSERT(!pmap_page_is_mapped(m),
2033		    ("vm_page_free_toq: freeing mapped page %p", m));
2034	}
2035	PCPU_INC(cnt.v_tfree);
2036
2037	if (VM_PAGE_IS_FREE(m))
2038		panic("vm_page_free: freeing free page %p", m);
2039	else if (m->busy != 0)
2040		panic("vm_page_free: freeing busy page %p", m);
2041
2042	/*
2043	 * Unqueue, then remove page.  Note that we cannot destroy
2044	 * the page here because we do not want to call the pager's
2045	 * callback routine until after we've put the page on the
2046	 * appropriate free queue.
2047	 */
2048	if ((m->oflags & VPO_UNMANAGED) == 0)
2049		vm_pageq_remove(m);
2050	vm_page_remove(m);
2051
2052	/*
2053	 * If fictitious remove object association and
2054	 * return, otherwise delay object association removal.
2055	 */
2056	if ((m->flags & PG_FICTITIOUS) != 0) {
2057		return;
2058	}
2059
2060	m->valid = 0;
2061	vm_page_undirty(m);
2062
2063	if (m->wire_count != 0)
2064		panic("vm_page_free: freeing wired page %p", m);
2065	if (m->hold_count != 0) {
2066		m->flags &= ~PG_ZERO;
2067		vm_page_lock_queues();
2068		vm_page_enqueue(PQ_HOLD, m);
2069		vm_page_unlock_queues();
2070	} else {
2071		/*
2072		 * Restore the default memory attribute to the page.
2073		 */
2074		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2075			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2076
2077		/*
2078		 * Insert the page into the physical memory allocator's
2079		 * cache/free page queues.
2080		 */
2081		mtx_lock(&vm_page_queue_free_mtx);
2082		m->flags |= PG_FREE;
2083		cnt.v_free_count++;
2084#if VM_NRESERVLEVEL > 0
2085		if (!vm_reserv_free_page(m))
2086#else
2087		if (TRUE)
2088#endif
2089			vm_phys_free_pages(m, 0);
2090		if ((m->flags & PG_ZERO) != 0)
2091			++vm_page_zero_count;
2092		else
2093			vm_page_zero_idle_wakeup();
2094		vm_page_free_wakeup();
2095		mtx_unlock(&vm_page_queue_free_mtx);
2096	}
2097}
2098
2099/*
2100 *	vm_page_wire:
2101 *
2102 *	Mark this page as wired down by yet
2103 *	another map, removing it from paging queues
2104 *	as necessary.
2105 *
2106 *	If the page is fictitious, then its wire count must remain one.
2107 *
2108 *	The page must be locked.
2109 */
2110void
2111vm_page_wire(vm_page_t m)
2112{
2113
2114	/*
2115	 * Only bump the wire statistics if the page is not already wired,
2116	 * and only unqueue the page if it is on some queue (if it is unmanaged
2117	 * it is already off the queues).
2118	 */
2119	vm_page_lock_assert(m, MA_OWNED);
2120	if ((m->flags & PG_FICTITIOUS) != 0) {
2121		KASSERT(m->wire_count == 1,
2122		    ("vm_page_wire: fictitious page %p's wire count isn't one",
2123		    m));
2124		return;
2125	}
2126	if (m->wire_count == 0) {
2127		if ((m->oflags & VPO_UNMANAGED) == 0)
2128			vm_pageq_remove(m);
2129		atomic_add_int(&cnt.v_wire_count, 1);
2130	}
2131	m->wire_count++;
2132	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
2133}
2134
2135/*
2136 * vm_page_unwire:
2137 *
2138 * Release one wiring of the specified page, potentially enabling it to be
2139 * paged again.  If paging is enabled, then the value of the parameter
2140 * "activate" determines to which queue the page is added.  If "activate" is
2141 * non-zero, then the page is added to the active queue.  Otherwise, it is
2142 * added to the inactive queue.
2143 *
2144 * However, unless the page belongs to an object, it is not enqueued because
2145 * it cannot be paged out.
2146 *
2147 * If a page is fictitious, then its wire count must alway be one.
2148 *
2149 * A managed page must be locked.
2150 */
2151void
2152vm_page_unwire(vm_page_t m, int activate)
2153{
2154
2155	if ((m->oflags & VPO_UNMANAGED) == 0)
2156		vm_page_lock_assert(m, MA_OWNED);
2157	if ((m->flags & PG_FICTITIOUS) != 0) {
2158		KASSERT(m->wire_count == 1,
2159	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2160		return;
2161	}
2162	if (m->wire_count > 0) {
2163		m->wire_count--;
2164		if (m->wire_count == 0) {
2165			atomic_subtract_int(&cnt.v_wire_count, 1);
2166			if ((m->oflags & VPO_UNMANAGED) != 0 ||
2167			    m->object == NULL)
2168				return;
2169			if (!activate)
2170				m->flags &= ~PG_WINATCFLS;
2171			vm_page_lock_queues();
2172			vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2173			vm_page_unlock_queues();
2174		}
2175	} else
2176		panic("vm_page_unwire: page %p's wire count is zero", m);
2177}
2178
2179/*
2180 * Move the specified page to the inactive queue.
2181 *
2182 * Many pages placed on the inactive queue should actually go
2183 * into the cache, but it is difficult to figure out which.  What
2184 * we do instead, if the inactive target is well met, is to put
2185 * clean pages at the head of the inactive queue instead of the tail.
2186 * This will cause them to be moved to the cache more quickly and
2187 * if not actively re-referenced, reclaimed more quickly.  If we just
2188 * stick these pages at the end of the inactive queue, heavy filesystem
2189 * meta-data accesses can cause an unnecessary paging load on memory bound
2190 * processes.  This optimization causes one-time-use metadata to be
2191 * reused more quickly.
2192 *
2193 * Normally athead is 0 resulting in LRU operation.  athead is set
2194 * to 1 if we want this page to be 'as if it were placed in the cache',
2195 * except without unmapping it from the process address space.
2196 *
2197 * The page must be locked.
2198 */
2199static inline void
2200_vm_page_deactivate(vm_page_t m, int athead)
2201{
2202	int queue;
2203
2204	vm_page_lock_assert(m, MA_OWNED);
2205
2206	/*
2207	 * Ignore if already inactive.
2208	 */
2209	if ((queue = m->queue) == PQ_INACTIVE)
2210		return;
2211	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2212		m->flags &= ~PG_WINATCFLS;
2213		vm_page_lock_queues();
2214		if (queue != PQ_NONE)
2215			vm_page_queue_remove(queue, m);
2216		if (athead)
2217			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m,
2218			    pageq);
2219		else
2220			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m,
2221			    pageq);
2222		m->queue = PQ_INACTIVE;
2223		cnt.v_inactive_count++;
2224		vm_page_unlock_queues();
2225	}
2226}
2227
2228/*
2229 * Move the specified page to the inactive queue.
2230 *
2231 * The page must be locked.
2232 */
2233void
2234vm_page_deactivate(vm_page_t m)
2235{
2236
2237	_vm_page_deactivate(m, 0);
2238}
2239
2240/*
2241 * vm_page_try_to_cache:
2242 *
2243 * Returns 0 on failure, 1 on success
2244 */
2245int
2246vm_page_try_to_cache(vm_page_t m)
2247{
2248
2249	vm_page_lock_assert(m, MA_OWNED);
2250	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2251	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2252	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2253		return (0);
2254	pmap_remove_all(m);
2255	if (m->dirty)
2256		return (0);
2257	vm_page_cache(m);
2258	return (1);
2259}
2260
2261/*
2262 * vm_page_try_to_free()
2263 *
2264 *	Attempt to free the page.  If we cannot free it, we do nothing.
2265 *	1 is returned on success, 0 on failure.
2266 */
2267int
2268vm_page_try_to_free(vm_page_t m)
2269{
2270
2271	vm_page_lock_assert(m, MA_OWNED);
2272	if (m->object != NULL)
2273		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2274	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2275	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2276		return (0);
2277	pmap_remove_all(m);
2278	if (m->dirty)
2279		return (0);
2280	vm_page_free(m);
2281	return (1);
2282}
2283
2284/*
2285 * vm_page_cache
2286 *
2287 * Put the specified page onto the page cache queue (if appropriate).
2288 *
2289 * The object and page must be locked.
2290 */
2291void
2292vm_page_cache(vm_page_t m)
2293{
2294	vm_object_t object;
2295	vm_page_t next, prev, root;
2296
2297	vm_page_lock_assert(m, MA_OWNED);
2298	object = m->object;
2299	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2300	if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
2301	    m->hold_count || m->wire_count)
2302		panic("vm_page_cache: attempting to cache busy page");
2303	pmap_remove_all(m);
2304	if (m->dirty != 0)
2305		panic("vm_page_cache: page %p is dirty", m);
2306	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2307	    (object->type == OBJT_SWAP &&
2308	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2309		/*
2310		 * Hypothesis: A cache-elgible page belonging to a
2311		 * default object or swap object but without a backing
2312		 * store must be zero filled.
2313		 */
2314		vm_page_free(m);
2315		return;
2316	}
2317	KASSERT((m->flags & PG_CACHED) == 0,
2318	    ("vm_page_cache: page %p is already cached", m));
2319	PCPU_INC(cnt.v_tcached);
2320
2321	/*
2322	 * Remove the page from the paging queues.
2323	 */
2324	vm_pageq_remove(m);
2325
2326	/*
2327	 * Remove the page from the object's collection of resident
2328	 * pages.
2329	 */
2330	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
2331		/*
2332		 * Since the page's successor in the list is also its parent
2333		 * in the tree, its right subtree must be empty.
2334		 */
2335		next->left = m->left;
2336		KASSERT(m->right == NULL,
2337		    ("vm_page_cache: page %p has right child", m));
2338	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
2339	    prev->right == m) {
2340		/*
2341		 * Since the page's predecessor in the list is also its parent
2342		 * in the tree, its left subtree must be empty.
2343		 */
2344		KASSERT(m->left == NULL,
2345		    ("vm_page_cache: page %p has left child", m));
2346		prev->right = m->right;
2347	} else {
2348		if (m != object->root)
2349			vm_page_splay(m->pindex, object->root);
2350		if (m->left == NULL)
2351			root = m->right;
2352		else if (m->right == NULL)
2353			root = m->left;
2354		else {
2355			/*
2356			 * Move the page's successor to the root, because
2357			 * pages are usually removed in ascending order.
2358			 */
2359			if (m->right != next)
2360				vm_page_splay(m->pindex, m->right);
2361			next->left = m->left;
2362			root = next;
2363		}
2364		object->root = root;
2365	}
2366	TAILQ_REMOVE(&object->memq, m, listq);
2367	object->resident_page_count--;
2368
2369	/*
2370	 * Restore the default memory attribute to the page.
2371	 */
2372	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2373		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2374
2375	/*
2376	 * Insert the page into the object's collection of cached pages
2377	 * and the physical memory allocator's cache/free page queues.
2378	 */
2379	m->flags &= ~PG_ZERO;
2380	mtx_lock(&vm_page_queue_free_mtx);
2381	m->flags |= PG_CACHED;
2382	cnt.v_cache_count++;
2383	root = object->cache;
2384	if (root == NULL) {
2385		m->left = NULL;
2386		m->right = NULL;
2387	} else {
2388		root = vm_page_splay(m->pindex, root);
2389		if (m->pindex < root->pindex) {
2390			m->left = root->left;
2391			m->right = root;
2392			root->left = NULL;
2393		} else if (__predict_false(m->pindex == root->pindex))
2394			panic("vm_page_cache: offset already cached");
2395		else {
2396			m->right = root->right;
2397			m->left = root;
2398			root->right = NULL;
2399		}
2400	}
2401	object->cache = m;
2402#if VM_NRESERVLEVEL > 0
2403	if (!vm_reserv_free_page(m)) {
2404#else
2405	if (TRUE) {
2406#endif
2407		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2408		vm_phys_free_pages(m, 0);
2409	}
2410	vm_page_free_wakeup();
2411	mtx_unlock(&vm_page_queue_free_mtx);
2412
2413	/*
2414	 * Increment the vnode's hold count if this is the object's only
2415	 * cached page.  Decrement the vnode's hold count if this was
2416	 * the object's only resident page.
2417	 */
2418	if (object->type == OBJT_VNODE) {
2419		if (root == NULL && object->resident_page_count != 0)
2420			vhold(object->handle);
2421		else if (root != NULL && object->resident_page_count == 0)
2422			vdrop(object->handle);
2423	}
2424}
2425
2426/*
2427 * vm_page_dontneed
2428 *
2429 *	Cache, deactivate, or do nothing as appropriate.  This routine
2430 *	is typically used by madvise() MADV_DONTNEED.
2431 *
2432 *	Generally speaking we want to move the page into the cache so
2433 *	it gets reused quickly.  However, this can result in a silly syndrome
2434 *	due to the page recycling too quickly.  Small objects will not be
2435 *	fully cached.  On the otherhand, if we move the page to the inactive
2436 *	queue we wind up with a problem whereby very large objects
2437 *	unnecessarily blow away our inactive and cache queues.
2438 *
2439 *	The solution is to move the pages based on a fixed weighting.  We
2440 *	either leave them alone, deactivate them, or move them to the cache,
2441 *	where moving them to the cache has the highest weighting.
2442 *	By forcing some pages into other queues we eventually force the
2443 *	system to balance the queues, potentially recovering other unrelated
2444 *	space from active.  The idea is to not force this to happen too
2445 *	often.
2446 *
2447 *	The object and page must be locked.
2448 */
2449void
2450vm_page_dontneed(vm_page_t m)
2451{
2452	int dnw;
2453	int head;
2454
2455	vm_page_lock_assert(m, MA_OWNED);
2456	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2457	dnw = PCPU_GET(dnweight);
2458	PCPU_INC(dnweight);
2459
2460	/*
2461	 * Occasionally leave the page alone.
2462	 */
2463	if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
2464		if (m->act_count >= ACT_INIT)
2465			--m->act_count;
2466		return;
2467	}
2468
2469	/*
2470	 * Clear any references to the page.  Otherwise, the page daemon will
2471	 * immediately reactivate the page.
2472	 *
2473	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
2474	 * pmap operation, such as pmap_remove(), could clear a reference in
2475	 * the pmap and set PGA_REFERENCED on the page before the
2476	 * pmap_clear_reference() had completed.  Consequently, the page would
2477	 * appear referenced based upon an old reference that occurred before
2478	 * this function ran.
2479	 */
2480	pmap_clear_reference(m);
2481	vm_page_aflag_clear(m, PGA_REFERENCED);
2482
2483	if (m->dirty == 0 && pmap_is_modified(m))
2484		vm_page_dirty(m);
2485
2486	if (m->dirty || (dnw & 0x0070) == 0) {
2487		/*
2488		 * Deactivate the page 3 times out of 32.
2489		 */
2490		head = 0;
2491	} else {
2492		/*
2493		 * Cache the page 28 times out of every 32.  Note that
2494		 * the page is deactivated instead of cached, but placed
2495		 * at the head of the queue instead of the tail.
2496		 */
2497		head = 1;
2498	}
2499	_vm_page_deactivate(m, head);
2500}
2501
2502/*
2503 * Grab a page, waiting until we are waken up due to the page
2504 * changing state.  We keep on waiting, if the page continues
2505 * to be in the object.  If the page doesn't exist, first allocate it
2506 * and then conditionally zero it.
2507 *
2508 * The caller must always specify the VM_ALLOC_RETRY flag.  This is intended
2509 * to facilitate its eventual removal.
2510 *
2511 * This routine may sleep.
2512 *
2513 * The object must be locked on entry.  The lock will, however, be released
2514 * and reacquired if the routine sleeps.
2515 */
2516vm_page_t
2517vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2518{
2519	vm_page_t m;
2520
2521	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2522	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
2523	    ("vm_page_grab: VM_ALLOC_RETRY is required"));
2524retrylookup:
2525	if ((m = vm_page_lookup(object, pindex)) != NULL) {
2526		if ((m->oflags & VPO_BUSY) != 0 ||
2527		    ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) {
2528			/*
2529			 * Reference the page before unlocking and
2530			 * sleeping so that the page daemon is less
2531			 * likely to reclaim it.
2532			 */
2533			vm_page_aflag_set(m, PGA_REFERENCED);
2534			vm_page_sleep(m, "pgrbwt");
2535			goto retrylookup;
2536		} else {
2537			if ((allocflags & VM_ALLOC_WIRED) != 0) {
2538				vm_page_lock(m);
2539				vm_page_wire(m);
2540				vm_page_unlock(m);
2541			}
2542			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
2543				vm_page_busy(m);
2544			return (m);
2545		}
2546	}
2547	m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
2548	    VM_ALLOC_IGN_SBUSY));
2549	if (m == NULL) {
2550		VM_OBJECT_UNLOCK(object);
2551		VM_WAIT;
2552		VM_OBJECT_LOCK(object);
2553		goto retrylookup;
2554	} else if (m->valid != 0)
2555		return (m);
2556	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2557		pmap_zero_page(m);
2558	return (m);
2559}
2560
2561/*
2562 * Mapping function for valid or dirty bits in a page.
2563 *
2564 * Inputs are required to range within a page.
2565 */
2566vm_page_bits_t
2567vm_page_bits(int base, int size)
2568{
2569	int first_bit;
2570	int last_bit;
2571
2572	KASSERT(
2573	    base + size <= PAGE_SIZE,
2574	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2575	);
2576
2577	if (size == 0)		/* handle degenerate case */
2578		return (0);
2579
2580	first_bit = base >> DEV_BSHIFT;
2581	last_bit = (base + size - 1) >> DEV_BSHIFT;
2582
2583	return (((vm_page_bits_t)2 << last_bit) -
2584	    ((vm_page_bits_t)1 << first_bit));
2585}
2586
2587/*
2588 *	vm_page_set_valid_range:
2589 *
2590 *	Sets portions of a page valid.  The arguments are expected
2591 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2592 *	of any partial chunks touched by the range.  The invalid portion of
2593 *	such chunks will be zeroed.
2594 *
2595 *	(base + size) must be less then or equal to PAGE_SIZE.
2596 */
2597void
2598vm_page_set_valid_range(vm_page_t m, int base, int size)
2599{
2600	int endoff, frag;
2601
2602	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2603	if (size == 0)	/* handle degenerate case */
2604		return;
2605
2606	/*
2607	 * If the base is not DEV_BSIZE aligned and the valid
2608	 * bit is clear, we have to zero out a portion of the
2609	 * first block.
2610	 */
2611	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2612	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2613		pmap_zero_page_area(m, frag, base - frag);
2614
2615	/*
2616	 * If the ending offset is not DEV_BSIZE aligned and the
2617	 * valid bit is clear, we have to zero out a portion of
2618	 * the last block.
2619	 */
2620	endoff = base + size;
2621	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2622	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2623		pmap_zero_page_area(m, endoff,
2624		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2625
2626	/*
2627	 * Assert that no previously invalid block that is now being validated
2628	 * is already dirty.
2629	 */
2630	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2631	    ("vm_page_set_valid_range: page %p is dirty", m));
2632
2633	/*
2634	 * Set valid bits inclusive of any overlap.
2635	 */
2636	m->valid |= vm_page_bits(base, size);
2637}
2638
2639/*
2640 * Clear the given bits from the specified page's dirty field.
2641 */
2642static __inline void
2643vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2644{
2645	uintptr_t addr;
2646#if PAGE_SIZE < 16384
2647	int shift;
2648#endif
2649
2650	/*
2651	 * If the object is locked and the page is neither VPO_BUSY nor
2652	 * write mapped, then the page's dirty field cannot possibly be
2653	 * set by a concurrent pmap operation.
2654	 */
2655	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2656	if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
2657		m->dirty &= ~pagebits;
2658	else {
2659		/*
2660		 * The pmap layer can call vm_page_dirty() without
2661		 * holding a distinguished lock.  The combination of
2662		 * the object's lock and an atomic operation suffice
2663		 * to guarantee consistency of the page dirty field.
2664		 *
2665		 * For PAGE_SIZE == 32768 case, compiler already
2666		 * properly aligns the dirty field, so no forcible
2667		 * alignment is needed. Only require existence of
2668		 * atomic_clear_64 when page size is 32768.
2669		 */
2670		addr = (uintptr_t)&m->dirty;
2671#if PAGE_SIZE == 32768
2672		atomic_clear_64((uint64_t *)addr, pagebits);
2673#elif PAGE_SIZE == 16384
2674		atomic_clear_32((uint32_t *)addr, pagebits);
2675#else		/* PAGE_SIZE <= 8192 */
2676		/*
2677		 * Use a trick to perform a 32-bit atomic on the
2678		 * containing aligned word, to not depend on the existence
2679		 * of atomic_clear_{8, 16}.
2680		 */
2681		shift = addr & (sizeof(uint32_t) - 1);
2682#if BYTE_ORDER == BIG_ENDIAN
2683		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2684#else
2685		shift *= NBBY;
2686#endif
2687		addr &= ~(sizeof(uint32_t) - 1);
2688		atomic_clear_32((uint32_t *)addr, pagebits << shift);
2689#endif		/* PAGE_SIZE */
2690	}
2691}
2692
2693/*
2694 *	vm_page_set_validclean:
2695 *
2696 *	Sets portions of a page valid and clean.  The arguments are expected
2697 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2698 *	of any partial chunks touched by the range.  The invalid portion of
2699 *	such chunks will be zero'd.
2700 *
2701 *	(base + size) must be less then or equal to PAGE_SIZE.
2702 */
2703void
2704vm_page_set_validclean(vm_page_t m, int base, int size)
2705{
2706	vm_page_bits_t oldvalid, pagebits;
2707	int endoff, frag;
2708
2709	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2710	if (size == 0)	/* handle degenerate case */
2711		return;
2712
2713	/*
2714	 * If the base is not DEV_BSIZE aligned and the valid
2715	 * bit is clear, we have to zero out a portion of the
2716	 * first block.
2717	 */
2718	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2719	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2720		pmap_zero_page_area(m, frag, base - frag);
2721
2722	/*
2723	 * If the ending offset is not DEV_BSIZE aligned and the
2724	 * valid bit is clear, we have to zero out a portion of
2725	 * the last block.
2726	 */
2727	endoff = base + size;
2728	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2729	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2730		pmap_zero_page_area(m, endoff,
2731		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2732
2733	/*
2734	 * Set valid, clear dirty bits.  If validating the entire
2735	 * page we can safely clear the pmap modify bit.  We also
2736	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2737	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2738	 * be set again.
2739	 *
2740	 * We set valid bits inclusive of any overlap, but we can only
2741	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2742	 * the range.
2743	 */
2744	oldvalid = m->valid;
2745	pagebits = vm_page_bits(base, size);
2746	m->valid |= pagebits;
2747#if 0	/* NOT YET */
2748	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2749		frag = DEV_BSIZE - frag;
2750		base += frag;
2751		size -= frag;
2752		if (size < 0)
2753			size = 0;
2754	}
2755	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2756#endif
2757	if (base == 0 && size == PAGE_SIZE) {
2758		/*
2759		 * The page can only be modified within the pmap if it is
2760		 * mapped, and it can only be mapped if it was previously
2761		 * fully valid.
2762		 */
2763		if (oldvalid == VM_PAGE_BITS_ALL)
2764			/*
2765			 * Perform the pmap_clear_modify() first.  Otherwise,
2766			 * a concurrent pmap operation, such as
2767			 * pmap_protect(), could clear a modification in the
2768			 * pmap and set the dirty field on the page before
2769			 * pmap_clear_modify() had begun and after the dirty
2770			 * field was cleared here.
2771			 */
2772			pmap_clear_modify(m);
2773		m->dirty = 0;
2774		m->oflags &= ~VPO_NOSYNC;
2775	} else if (oldvalid != VM_PAGE_BITS_ALL)
2776		m->dirty &= ~pagebits;
2777	else
2778		vm_page_clear_dirty_mask(m, pagebits);
2779}
2780
2781void
2782vm_page_clear_dirty(vm_page_t m, int base, int size)
2783{
2784
2785	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2786}
2787
2788/*
2789 *	vm_page_set_invalid:
2790 *
2791 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2792 *	valid and dirty bits for the effected areas are cleared.
2793 */
2794void
2795vm_page_set_invalid(vm_page_t m, int base, int size)
2796{
2797	vm_page_bits_t bits;
2798
2799	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2800	KASSERT((m->oflags & VPO_BUSY) == 0,
2801	    ("vm_page_set_invalid: page %p is busy", m));
2802	bits = vm_page_bits(base, size);
2803	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2804		pmap_remove_all(m);
2805	KASSERT(!pmap_page_is_mapped(m),
2806	    ("vm_page_set_invalid: page %p is mapped", m));
2807	m->valid &= ~bits;
2808	m->dirty &= ~bits;
2809}
2810
2811/*
2812 * vm_page_zero_invalid()
2813 *
2814 *	The kernel assumes that the invalid portions of a page contain
2815 *	garbage, but such pages can be mapped into memory by user code.
2816 *	When this occurs, we must zero out the non-valid portions of the
2817 *	page so user code sees what it expects.
2818 *
2819 *	Pages are most often semi-valid when the end of a file is mapped
2820 *	into memory and the file's size is not page aligned.
2821 */
2822void
2823vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2824{
2825	int b;
2826	int i;
2827
2828	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2829	/*
2830	 * Scan the valid bits looking for invalid sections that
2831	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2832	 * valid bit may be set ) have already been zerod by
2833	 * vm_page_set_validclean().
2834	 */
2835	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2836		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2837		    (m->valid & ((vm_page_bits_t)1 << i))) {
2838			if (i > b) {
2839				pmap_zero_page_area(m,
2840				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
2841			}
2842			b = i + 1;
2843		}
2844	}
2845
2846	/*
2847	 * setvalid is TRUE when we can safely set the zero'd areas
2848	 * as being valid.  We can do this if there are no cache consistancy
2849	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2850	 */
2851	if (setvalid)
2852		m->valid = VM_PAGE_BITS_ALL;
2853}
2854
2855/*
2856 *	vm_page_is_valid:
2857 *
2858 *	Is (partial) page valid?  Note that the case where size == 0
2859 *	will return FALSE in the degenerate case where the page is
2860 *	entirely invalid, and TRUE otherwise.
2861 */
2862int
2863vm_page_is_valid(vm_page_t m, int base, int size)
2864{
2865	vm_page_bits_t bits;
2866
2867	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2868	bits = vm_page_bits(base, size);
2869	if (m->valid && ((m->valid & bits) == bits))
2870		return 1;
2871	else
2872		return 0;
2873}
2874
2875/*
2876 * Set the page's dirty bits if the page is modified.
2877 */
2878void
2879vm_page_test_dirty(vm_page_t m)
2880{
2881
2882	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2883	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
2884		vm_page_dirty(m);
2885}
2886
2887void
2888vm_page_lock_KBI(vm_page_t m, const char *file, int line)
2889{
2890
2891	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
2892}
2893
2894void
2895vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
2896{
2897
2898	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
2899}
2900
2901int
2902vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
2903{
2904
2905	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
2906}
2907
2908#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
2909void
2910vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
2911{
2912
2913	mtx_assert_(vm_page_lockptr(m), a, file, line);
2914}
2915#endif
2916
2917int so_zerocp_fullpage = 0;
2918
2919/*
2920 *	Replace the given page with a copy.  The copied page assumes
2921 *	the portion of the given page's "wire_count" that is not the
2922 *	responsibility of this copy-on-write mechanism.
2923 *
2924 *	The object containing the given page must have a non-zero
2925 *	paging-in-progress count and be locked.
2926 */
2927void
2928vm_page_cowfault(vm_page_t m)
2929{
2930	vm_page_t mnew;
2931	vm_object_t object;
2932	vm_pindex_t pindex;
2933
2934	vm_page_lock_assert(m, MA_OWNED);
2935	object = m->object;
2936	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2937	KASSERT(object->paging_in_progress != 0,
2938	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2939	    object));
2940	pindex = m->pindex;
2941
2942 retry_alloc:
2943	pmap_remove_all(m);
2944	vm_page_remove(m);
2945	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
2946	if (mnew == NULL) {
2947		vm_page_insert(m, object, pindex);
2948		vm_page_unlock(m);
2949		VM_OBJECT_UNLOCK(object);
2950		VM_WAIT;
2951		VM_OBJECT_LOCK(object);
2952		if (m == vm_page_lookup(object, pindex)) {
2953			vm_page_lock(m);
2954			goto retry_alloc;
2955		} else {
2956			/*
2957			 * Page disappeared during the wait.
2958			 */
2959			return;
2960		}
2961	}
2962
2963	if (m->cow == 0) {
2964		/*
2965		 * check to see if we raced with an xmit complete when
2966		 * waiting to allocate a page.  If so, put things back
2967		 * the way they were
2968		 */
2969		vm_page_unlock(m);
2970		vm_page_lock(mnew);
2971		vm_page_free(mnew);
2972		vm_page_unlock(mnew);
2973		vm_page_insert(m, object, pindex);
2974	} else { /* clear COW & copy page */
2975		if (!so_zerocp_fullpage)
2976			pmap_copy_page(m, mnew);
2977		mnew->valid = VM_PAGE_BITS_ALL;
2978		vm_page_dirty(mnew);
2979		mnew->wire_count = m->wire_count - m->cow;
2980		m->wire_count = m->cow;
2981		vm_page_unlock(m);
2982	}
2983}
2984
2985void
2986vm_page_cowclear(vm_page_t m)
2987{
2988
2989	vm_page_lock_assert(m, MA_OWNED);
2990	if (m->cow) {
2991		m->cow--;
2992		/*
2993		 * let vm_fault add back write permission  lazily
2994		 */
2995	}
2996	/*
2997	 *  sf_buf_free() will free the page, so we needn't do it here
2998	 */
2999}
3000
3001int
3002vm_page_cowsetup(vm_page_t m)
3003{
3004
3005	vm_page_lock_assert(m, MA_OWNED);
3006	if ((m->flags & PG_FICTITIOUS) != 0 ||
3007	    (m->oflags & VPO_UNMANAGED) != 0 ||
3008	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
3009		return (EBUSY);
3010	m->cow++;
3011	pmap_remove_write(m);
3012	VM_OBJECT_UNLOCK(m->object);
3013	return (0);
3014}
3015
3016#ifdef INVARIANTS
3017void
3018vm_page_object_lock_assert(vm_page_t m)
3019{
3020
3021	/*
3022	 * Certain of the page's fields may only be modified by the
3023	 * holder of the containing object's lock or the setter of the
3024	 * page's VPO_BUSY flag.  Unfortunately, the setter of the
3025	 * VPO_BUSY flag is not recorded, and thus cannot be checked
3026	 * here.
3027	 */
3028	if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
3029		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
3030}
3031#endif
3032
3033#include "opt_ddb.h"
3034#ifdef DDB
3035#include <sys/kernel.h>
3036
3037#include <ddb/ddb.h>
3038
3039DB_SHOW_COMMAND(page, vm_page_print_page_info)
3040{
3041	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
3042	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
3043	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
3044	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
3045	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
3046	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
3047	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
3048	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
3049	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
3050	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
3051}
3052
3053DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3054{
3055
3056	db_printf("PQ_FREE:");
3057	db_printf(" %d", cnt.v_free_count);
3058	db_printf("\n");
3059
3060	db_printf("PQ_CACHE:");
3061	db_printf(" %d", cnt.v_cache_count);
3062	db_printf("\n");
3063
3064	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
3065		*vm_page_queues[PQ_ACTIVE].cnt,
3066		*vm_page_queues[PQ_INACTIVE].cnt);
3067}
3068#endif /* DDB */
3069