vm_page.c revision 208772
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34 */
35
36/*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *			GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 *	- a pageq mutex is required when adding or removing a page from a
67 *	  page queue (vm_page_queue[]), regardless of other mutexes or the
68 *	  busy state of a page.
69 *
70 *	- a hash chain mutex is required when associating or disassociating
71 *	  a page from the VM PAGE CACHE hash table (vm_page_buckets),
72 *	  regardless of other mutexes or the busy state of a page.
73 *
74 *	- either a hash chain mutex OR a busied page is required in order
75 *	  to modify the page flags.  A hash chain mutex must be obtained in
76 *	  order to busy a page.  A page's flags cannot be modified by a
77 *	  hash chain mutex if the page is marked busy.
78 *
79 *	- The object memq mutex is held when inserting or removing
80 *	  pages from an object (vm_page_insert() or vm_page_remove()).  This
81 *	  is different from the object's main mutex.
82 *
83 *	Generally speaking, you have to be aware of side effects when running
84 *	vm_page ops.  A vm_page_lookup() will return with the hash chain
85 *	locked, whether it was able to lookup the page or not.  vm_page_free(),
86 *	vm_page_cache(), vm_page_activate(), and a number of other routines
87 *	will release the hash chain mutex for you.  Intermediate manipulation
88 *	routines such as vm_page_flag_set() expect the hash chain to be held
89 *	on entry and the hash chain will remain held on return.
90 *
91 *	pageq scanning can only occur with the pageq in question locked.
92 *	We have a known bottleneck with the active queue, but the cache
93 *	and free queues are actually arrays already.
94 */
95
96/*
97 *	Resident memory management module.
98 */
99
100#include <sys/cdefs.h>
101__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 208772 2010-06-03 10:11:45Z kib $");
102
103#include "opt_vm.h"
104
105#include <sys/param.h>
106#include <sys/systm.h>
107#include <sys/lock.h>
108#include <sys/kernel.h>
109#include <sys/limits.h>
110#include <sys/malloc.h>
111#include <sys/msgbuf.h>
112#include <sys/mutex.h>
113#include <sys/proc.h>
114#include <sys/sysctl.h>
115#include <sys/vmmeter.h>
116#include <sys/vnode.h>
117
118#include <vm/vm.h>
119#include <vm/pmap.h>
120#include <vm/vm_param.h>
121#include <vm/vm_kern.h>
122#include <vm/vm_object.h>
123#include <vm/vm_page.h>
124#include <vm/vm_pageout.h>
125#include <vm/vm_pager.h>
126#include <vm/vm_phys.h>
127#include <vm/vm_reserv.h>
128#include <vm/vm_extern.h>
129#include <vm/uma.h>
130#include <vm/uma_int.h>
131
132#include <machine/md_var.h>
133
134#if defined(__amd64__) || defined (__i386__)
135extern struct sysctl_oid_list sysctl__vm_pmap_children;
136#else
137SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
138#endif
139
140static uint64_t pmap_tryrelock_calls;
141SYSCTL_QUAD(_vm_pmap, OID_AUTO, tryrelock_calls, CTLFLAG_RD,
142    &pmap_tryrelock_calls, 0, "Number of tryrelock calls");
143
144static int pmap_tryrelock_restart;
145SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
146    &pmap_tryrelock_restart, 0, "Number of tryrelock restarts");
147
148static int pmap_tryrelock_race;
149SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_race, CTLFLAG_RD,
150    &pmap_tryrelock_race, 0, "Number of tryrelock pmap race cases");
151
152/*
153 *	Associated with page of user-allocatable memory is a
154 *	page structure.
155 */
156
157struct vpgqueues vm_page_queues[PQ_COUNT];
158struct vpglocks vm_page_queue_lock;
159struct vpglocks vm_page_queue_free_lock;
160
161struct vpglocks	pa_lock[PA_LOCK_COUNT] __aligned(CACHE_LINE_SIZE);
162
163vm_page_t vm_page_array = 0;
164int vm_page_array_size = 0;
165long first_page = 0;
166int vm_page_zero_count = 0;
167
168static int boot_pages = UMA_BOOT_PAGES;
169TUNABLE_INT("vm.boot_pages", &boot_pages);
170SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
171	"number of pages allocated for bootstrapping the VM system");
172
173static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits);
174static void vm_page_queue_remove(int queue, vm_page_t m);
175static void vm_page_enqueue(int queue, vm_page_t m);
176
177/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
178#if PAGE_SIZE == 32768
179#ifdef CTASSERT
180CTASSERT(sizeof(u_long) >= 8);
181#endif
182#endif
183
184/*
185 * Try to acquire a physical address lock while a pmap is locked.  If we
186 * fail to trylock we unlock and lock the pmap directly and cache the
187 * locked pa in *locked.  The caller should then restart their loop in case
188 * the virtual to physical mapping has changed.
189 */
190int
191vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
192{
193	vm_paddr_t lockpa;
194	uint32_t gen_count;
195
196	gen_count = pmap->pm_gen_count;
197	atomic_add_long((volatile long *)&pmap_tryrelock_calls, 1);
198	lockpa = *locked;
199	*locked = pa;
200	if (lockpa) {
201		PA_LOCK_ASSERT(lockpa, MA_OWNED);
202		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
203			return (0);
204		PA_UNLOCK(lockpa);
205	}
206	if (PA_TRYLOCK(pa))
207		return (0);
208	PMAP_UNLOCK(pmap);
209	atomic_add_int((volatile int *)&pmap_tryrelock_restart, 1);
210	PA_LOCK(pa);
211	PMAP_LOCK(pmap);
212
213	if (pmap->pm_gen_count != gen_count + 1) {
214		pmap->pm_retries++;
215		atomic_add_int((volatile int *)&pmap_tryrelock_race, 1);
216		return (EAGAIN);
217	}
218	return (0);
219}
220
221/*
222 *	vm_set_page_size:
223 *
224 *	Sets the page size, perhaps based upon the memory
225 *	size.  Must be called before any use of page-size
226 *	dependent functions.
227 */
228void
229vm_set_page_size(void)
230{
231	if (cnt.v_page_size == 0)
232		cnt.v_page_size = PAGE_SIZE;
233	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
234		panic("vm_set_page_size: page size not a power of two");
235}
236
237/*
238 *	vm_page_blacklist_lookup:
239 *
240 *	See if a physical address in this page has been listed
241 *	in the blacklist tunable.  Entries in the tunable are
242 *	separated by spaces or commas.  If an invalid integer is
243 *	encountered then the rest of the string is skipped.
244 */
245static int
246vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
247{
248	vm_paddr_t bad;
249	char *cp, *pos;
250
251	for (pos = list; *pos != '\0'; pos = cp) {
252		bad = strtoq(pos, &cp, 0);
253		if (*cp != '\0') {
254			if (*cp == ' ' || *cp == ',') {
255				cp++;
256				if (cp == pos)
257					continue;
258			} else
259				break;
260		}
261		if (pa == trunc_page(bad))
262			return (1);
263	}
264	return (0);
265}
266
267/*
268 *	vm_page_startup:
269 *
270 *	Initializes the resident memory module.
271 *
272 *	Allocates memory for the page cells, and
273 *	for the object/offset-to-page hash table headers.
274 *	Each page cell is initialized and placed on the free list.
275 */
276vm_offset_t
277vm_page_startup(vm_offset_t vaddr)
278{
279	vm_offset_t mapped;
280	vm_paddr_t page_range;
281	vm_paddr_t new_end;
282	int i;
283	vm_paddr_t pa;
284	int nblocks;
285	vm_paddr_t last_pa;
286	char *list;
287
288	/* the biggest memory array is the second group of pages */
289	vm_paddr_t end;
290	vm_paddr_t biggestsize;
291	vm_paddr_t low_water, high_water;
292	int biggestone;
293
294	biggestsize = 0;
295	biggestone = 0;
296	nblocks = 0;
297	vaddr = round_page(vaddr);
298
299	for (i = 0; phys_avail[i + 1]; i += 2) {
300		phys_avail[i] = round_page(phys_avail[i]);
301		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
302	}
303
304	low_water = phys_avail[0];
305	high_water = phys_avail[1];
306
307	for (i = 0; phys_avail[i + 1]; i += 2) {
308		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
309
310		if (size > biggestsize) {
311			biggestone = i;
312			biggestsize = size;
313		}
314		if (phys_avail[i] < low_water)
315			low_water = phys_avail[i];
316		if (phys_avail[i + 1] > high_water)
317			high_water = phys_avail[i + 1];
318		++nblocks;
319	}
320
321#ifdef XEN
322	low_water = 0;
323#endif
324
325	end = phys_avail[biggestone+1];
326
327	/*
328	 * Initialize the locks.
329	 */
330	mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
331	    MTX_RECURSE);
332	mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
333	    MTX_DEF);
334
335	/* Setup page locks. */
336	for (i = 0; i < PA_LOCK_COUNT; i++)
337		mtx_init(&pa_lock[i].data, "page lock", NULL,
338		    MTX_DEF | MTX_RECURSE | MTX_DUPOK);
339
340	/*
341	 * Initialize the queue headers for the hold queue, the active queue,
342	 * and the inactive queue.
343	 */
344	for (i = 0; i < PQ_COUNT; i++)
345		TAILQ_INIT(&vm_page_queues[i].pl);
346	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
347	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
348	vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
349
350	/*
351	 * Allocate memory for use when boot strapping the kernel memory
352	 * allocator.
353	 */
354	new_end = end - (boot_pages * UMA_SLAB_SIZE);
355	new_end = trunc_page(new_end);
356	mapped = pmap_map(&vaddr, new_end, end,
357	    VM_PROT_READ | VM_PROT_WRITE);
358	bzero((void *)mapped, end - new_end);
359	uma_startup((void *)mapped, boot_pages);
360
361#if defined(__amd64__) || defined(__i386__) || defined(__arm__)
362	/*
363	 * Allocate a bitmap to indicate that a random physical page
364	 * needs to be included in a minidump.
365	 *
366	 * The amd64 port needs this to indicate which direct map pages
367	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
368	 *
369	 * However, i386 still needs this workspace internally within the
370	 * minidump code.  In theory, they are not needed on i386, but are
371	 * included should the sf_buf code decide to use them.
372	 */
373	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
374	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
375	new_end -= vm_page_dump_size;
376	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
377	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
378	bzero((void *)vm_page_dump, vm_page_dump_size);
379#endif
380#ifdef __amd64__
381	/*
382	 * Request that the physical pages underlying the message buffer be
383	 * included in a crash dump.  Since the message buffer is accessed
384	 * through the direct map, they are not automatically included.
385	 */
386	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
387	last_pa = pa + round_page(MSGBUF_SIZE);
388	while (pa < last_pa) {
389		dump_add_page(pa);
390		pa += PAGE_SIZE;
391	}
392#endif
393	/*
394	 * Compute the number of pages of memory that will be available for
395	 * use (taking into account the overhead of a page structure per
396	 * page).
397	 */
398	first_page = low_water / PAGE_SIZE;
399#ifdef VM_PHYSSEG_SPARSE
400	page_range = 0;
401	for (i = 0; phys_avail[i + 1] != 0; i += 2)
402		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
403#elif defined(VM_PHYSSEG_DENSE)
404	page_range = high_water / PAGE_SIZE - first_page;
405#else
406#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
407#endif
408	end = new_end;
409
410	/*
411	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
412	 */
413	vaddr += PAGE_SIZE;
414
415	/*
416	 * Initialize the mem entry structures now, and put them in the free
417	 * queue.
418	 */
419	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
420	mapped = pmap_map(&vaddr, new_end, end,
421	    VM_PROT_READ | VM_PROT_WRITE);
422	vm_page_array = (vm_page_t) mapped;
423#if VM_NRESERVLEVEL > 0
424	/*
425	 * Allocate memory for the reservation management system's data
426	 * structures.
427	 */
428	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
429#endif
430#ifdef __amd64__
431	/*
432	 * pmap_map on amd64 comes out of the direct-map, not kvm like i386,
433	 * so the pages must be tracked for a crashdump to include this data.
434	 * This includes the vm_page_array and the early UMA bootstrap pages.
435	 */
436	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
437		dump_add_page(pa);
438#endif
439	phys_avail[biggestone + 1] = new_end;
440
441	/*
442	 * Clear all of the page structures
443	 */
444	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
445	for (i = 0; i < page_range; i++)
446		vm_page_array[i].order = VM_NFREEORDER;
447	vm_page_array_size = page_range;
448
449	/*
450	 * Initialize the physical memory allocator.
451	 */
452	vm_phys_init();
453
454	/*
455	 * Add every available physical page that is not blacklisted to
456	 * the free lists.
457	 */
458	cnt.v_page_count = 0;
459	cnt.v_free_count = 0;
460	list = getenv("vm.blacklist");
461	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
462		pa = phys_avail[i];
463		last_pa = phys_avail[i + 1];
464		while (pa < last_pa) {
465			if (list != NULL &&
466			    vm_page_blacklist_lookup(list, pa))
467				printf("Skipping page with pa 0x%jx\n",
468				    (uintmax_t)pa);
469			else
470				vm_phys_add_page(pa);
471			pa += PAGE_SIZE;
472		}
473	}
474	freeenv(list);
475#if VM_NRESERVLEVEL > 0
476	/*
477	 * Initialize the reservation management system.
478	 */
479	vm_reserv_init();
480#endif
481	return (vaddr);
482}
483
484void
485vm_page_flag_set(vm_page_t m, unsigned short bits)
486{
487
488	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
489	/*
490	 * For a managed page, the PG_WRITEABLE flag can be set only if
491	 * the page is VPO_BUSY.  Currently this flag is only set by
492	 * pmap_enter().
493	 */
494	KASSERT((bits & PG_WRITEABLE) == 0 ||
495	    (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) != 0 ||
496	    (m->oflags & VPO_BUSY) != 0, ("PG_WRITEABLE and !VPO_BUSY"));
497	m->flags |= bits;
498}
499
500void
501vm_page_flag_clear(vm_page_t m, unsigned short bits)
502{
503
504	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
505	m->flags &= ~bits;
506}
507
508void
509vm_page_busy(vm_page_t m)
510{
511
512	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
513	KASSERT((m->oflags & VPO_BUSY) == 0,
514	    ("vm_page_busy: page already busy!!!"));
515	m->oflags |= VPO_BUSY;
516}
517
518/*
519 *      vm_page_flash:
520 *
521 *      wakeup anyone waiting for the page.
522 */
523void
524vm_page_flash(vm_page_t m)
525{
526
527	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
528	if (m->oflags & VPO_WANTED) {
529		m->oflags &= ~VPO_WANTED;
530		wakeup(m);
531	}
532}
533
534/*
535 *      vm_page_wakeup:
536 *
537 *      clear the VPO_BUSY flag and wakeup anyone waiting for the
538 *      page.
539 *
540 */
541void
542vm_page_wakeup(vm_page_t m)
543{
544
545	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
546	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
547	m->oflags &= ~VPO_BUSY;
548	vm_page_flash(m);
549}
550
551void
552vm_page_io_start(vm_page_t m)
553{
554
555	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
556	m->busy++;
557}
558
559void
560vm_page_io_finish(vm_page_t m)
561{
562
563	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
564	m->busy--;
565	if (m->busy == 0)
566		vm_page_flash(m);
567}
568
569/*
570 * Keep page from being freed by the page daemon
571 * much of the same effect as wiring, except much lower
572 * overhead and should be used only for *very* temporary
573 * holding ("wiring").
574 */
575void
576vm_page_hold(vm_page_t mem)
577{
578
579	vm_page_lock_assert(mem, MA_OWNED);
580        mem->hold_count++;
581}
582
583void
584vm_page_unhold(vm_page_t mem)
585{
586
587	vm_page_lock_assert(mem, MA_OWNED);
588	--mem->hold_count;
589	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
590	if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
591		vm_page_free_toq(mem);
592}
593
594/*
595 *	vm_page_free:
596 *
597 *	Free a page.
598 */
599void
600vm_page_free(vm_page_t m)
601{
602
603	m->flags &= ~PG_ZERO;
604	vm_page_free_toq(m);
605}
606
607/*
608 *	vm_page_free_zero:
609 *
610 *	Free a page to the zerod-pages queue
611 */
612void
613vm_page_free_zero(vm_page_t m)
614{
615
616	m->flags |= PG_ZERO;
617	vm_page_free_toq(m);
618}
619
620/*
621 *	vm_page_sleep:
622 *
623 *	Sleep and release the page and page queues locks.
624 *
625 *	The object containing the given page must be locked.
626 */
627void
628vm_page_sleep(vm_page_t m, const char *msg)
629{
630
631	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
632	if (mtx_owned(&vm_page_queue_mtx))
633		vm_page_unlock_queues();
634	if (mtx_owned(vm_page_lockptr(m)))
635		vm_page_unlock(m);
636
637	/*
638	 * It's possible that while we sleep, the page will get
639	 * unbusied and freed.  If we are holding the object
640	 * lock, we will assume we hold a reference to the object
641	 * such that even if m->object changes, we can re-lock
642	 * it.
643	 */
644	m->oflags |= VPO_WANTED;
645	msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
646}
647
648/*
649 *	vm_page_dirty:
650 *
651 *	make page all dirty
652 */
653void
654vm_page_dirty(vm_page_t m)
655{
656
657	KASSERT((m->flags & PG_CACHED) == 0,
658	    ("vm_page_dirty: page in cache!"));
659	KASSERT(!VM_PAGE_IS_FREE(m),
660	    ("vm_page_dirty: page is free!"));
661	KASSERT(m->valid == VM_PAGE_BITS_ALL,
662	    ("vm_page_dirty: page is invalid!"));
663	m->dirty = VM_PAGE_BITS_ALL;
664}
665
666/*
667 *	vm_page_splay:
668 *
669 *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
670 *	the vm_page containing the given pindex.  If, however, that
671 *	pindex is not found in the vm_object, returns a vm_page that is
672 *	adjacent to the pindex, coming before or after it.
673 */
674vm_page_t
675vm_page_splay(vm_pindex_t pindex, vm_page_t root)
676{
677	struct vm_page dummy;
678	vm_page_t lefttreemax, righttreemin, y;
679
680	if (root == NULL)
681		return (root);
682	lefttreemax = righttreemin = &dummy;
683	for (;; root = y) {
684		if (pindex < root->pindex) {
685			if ((y = root->left) == NULL)
686				break;
687			if (pindex < y->pindex) {
688				/* Rotate right. */
689				root->left = y->right;
690				y->right = root;
691				root = y;
692				if ((y = root->left) == NULL)
693					break;
694			}
695			/* Link into the new root's right tree. */
696			righttreemin->left = root;
697			righttreemin = root;
698		} else if (pindex > root->pindex) {
699			if ((y = root->right) == NULL)
700				break;
701			if (pindex > y->pindex) {
702				/* Rotate left. */
703				root->right = y->left;
704				y->left = root;
705				root = y;
706				if ((y = root->right) == NULL)
707					break;
708			}
709			/* Link into the new root's left tree. */
710			lefttreemax->right = root;
711			lefttreemax = root;
712		} else
713			break;
714	}
715	/* Assemble the new root. */
716	lefttreemax->right = root->left;
717	righttreemin->left = root->right;
718	root->left = dummy.right;
719	root->right = dummy.left;
720	return (root);
721}
722
723/*
724 *	vm_page_insert:		[ internal use only ]
725 *
726 *	Inserts the given mem entry into the object and object list.
727 *
728 *	The pagetables are not updated but will presumably fault the page
729 *	in if necessary, or if a kernel page the caller will at some point
730 *	enter the page into the kernel's pmap.  We are not allowed to block
731 *	here so we *can't* do this anyway.
732 *
733 *	The object and page must be locked.
734 *	This routine may not block.
735 */
736void
737vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
738{
739	vm_page_t root;
740
741	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
742	if (m->object != NULL)
743		panic("vm_page_insert: page already inserted");
744
745	/*
746	 * Record the object/offset pair in this page
747	 */
748	m->object = object;
749	m->pindex = pindex;
750
751	/*
752	 * Now link into the object's ordered list of backed pages.
753	 */
754	root = object->root;
755	if (root == NULL) {
756		m->left = NULL;
757		m->right = NULL;
758		TAILQ_INSERT_TAIL(&object->memq, m, listq);
759	} else {
760		root = vm_page_splay(pindex, root);
761		if (pindex < root->pindex) {
762			m->left = root->left;
763			m->right = root;
764			root->left = NULL;
765			TAILQ_INSERT_BEFORE(root, m, listq);
766		} else if (pindex == root->pindex)
767			panic("vm_page_insert: offset already allocated");
768		else {
769			m->right = root->right;
770			m->left = root;
771			root->right = NULL;
772			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
773		}
774	}
775	object->root = m;
776	object->generation++;
777
778	/*
779	 * show that the object has one more resident page.
780	 */
781	object->resident_page_count++;
782	/*
783	 * Hold the vnode until the last page is released.
784	 */
785	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
786		vhold((struct vnode *)object->handle);
787
788	/*
789	 * Since we are inserting a new and possibly dirty page,
790	 * update the object's OBJ_MIGHTBEDIRTY flag.
791	 */
792	if (m->flags & PG_WRITEABLE)
793		vm_object_set_writeable_dirty(object);
794}
795
796/*
797 *	vm_page_remove:
798 *				NOTE: used by device pager as well -wfj
799 *
800 *	Removes the given mem entry from the object/offset-page
801 *	table and the object page list, but do not invalidate/terminate
802 *	the backing store.
803 *
804 *	The object and page must be locked.
805 *	The underlying pmap entry (if any) is NOT removed here.
806 *	This routine may not block.
807 */
808void
809vm_page_remove(vm_page_t m)
810{
811	vm_object_t object;
812	vm_page_t root;
813
814	if ((m->flags & PG_UNMANAGED) == 0)
815		vm_page_lock_assert(m, MA_OWNED);
816	if ((object = m->object) == NULL)
817		return;
818	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
819	if (m->oflags & VPO_BUSY) {
820		m->oflags &= ~VPO_BUSY;
821		vm_page_flash(m);
822	}
823
824	/*
825	 * Now remove from the object's list of backed pages.
826	 */
827	if (m != object->root)
828		vm_page_splay(m->pindex, object->root);
829	if (m->left == NULL)
830		root = m->right;
831	else {
832		root = vm_page_splay(m->pindex, m->left);
833		root->right = m->right;
834	}
835	object->root = root;
836	TAILQ_REMOVE(&object->memq, m, listq);
837
838	/*
839	 * And show that the object has one fewer resident page.
840	 */
841	object->resident_page_count--;
842	object->generation++;
843	/*
844	 * The vnode may now be recycled.
845	 */
846	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
847		vdrop((struct vnode *)object->handle);
848
849	m->object = NULL;
850}
851
852/*
853 *	vm_page_lookup:
854 *
855 *	Returns the page associated with the object/offset
856 *	pair specified; if none is found, NULL is returned.
857 *
858 *	The object must be locked.
859 *	This routine may not block.
860 *	This is a critical path routine
861 */
862vm_page_t
863vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
864{
865	vm_page_t m;
866
867	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
868	if ((m = object->root) != NULL && m->pindex != pindex) {
869		m = vm_page_splay(pindex, m);
870		if ((object->root = m)->pindex != pindex)
871			m = NULL;
872	}
873	return (m);
874}
875
876/*
877 *	vm_page_rename:
878 *
879 *	Move the given memory entry from its
880 *	current object to the specified target object/offset.
881 *
882 *	The object must be locked.
883 *	This routine may not block.
884 *
885 *	Note: swap associated with the page must be invalidated by the move.  We
886 *	      have to do this for several reasons:  (1) we aren't freeing the
887 *	      page, (2) we are dirtying the page, (3) the VM system is probably
888 *	      moving the page from object A to B, and will then later move
889 *	      the backing store from A to B and we can't have a conflict.
890 *
891 *	Note: we *always* dirty the page.  It is necessary both for the
892 *	      fact that we moved it, and because we may be invalidating
893 *	      swap.  If the page is on the cache, we have to deactivate it
894 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
895 *	      on the cache.
896 */
897void
898vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
899{
900
901	vm_page_remove(m);
902	vm_page_insert(m, new_object, new_pindex);
903	vm_page_dirty(m);
904}
905
906/*
907 *	Convert all of the given object's cached pages that have a
908 *	pindex within the given range into free pages.  If the value
909 *	zero is given for "end", then the range's upper bound is
910 *	infinity.  If the given object is backed by a vnode and it
911 *	transitions from having one or more cached pages to none, the
912 *	vnode's hold count is reduced.
913 */
914void
915vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
916{
917	vm_page_t m, m_next;
918	boolean_t empty;
919
920	mtx_lock(&vm_page_queue_free_mtx);
921	if (__predict_false(object->cache == NULL)) {
922		mtx_unlock(&vm_page_queue_free_mtx);
923		return;
924	}
925	m = object->cache = vm_page_splay(start, object->cache);
926	if (m->pindex < start) {
927		if (m->right == NULL)
928			m = NULL;
929		else {
930			m_next = vm_page_splay(start, m->right);
931			m_next->left = m;
932			m->right = NULL;
933			m = object->cache = m_next;
934		}
935	}
936
937	/*
938	 * At this point, "m" is either (1) a reference to the page
939	 * with the least pindex that is greater than or equal to
940	 * "start" or (2) NULL.
941	 */
942	for (; m != NULL && (m->pindex < end || end == 0); m = m_next) {
943		/*
944		 * Find "m"'s successor and remove "m" from the
945		 * object's cache.
946		 */
947		if (m->right == NULL) {
948			object->cache = m->left;
949			m_next = NULL;
950		} else {
951			m_next = vm_page_splay(start, m->right);
952			m_next->left = m->left;
953			object->cache = m_next;
954		}
955		/* Convert "m" to a free page. */
956		m->object = NULL;
957		m->valid = 0;
958		/* Clear PG_CACHED and set PG_FREE. */
959		m->flags ^= PG_CACHED | PG_FREE;
960		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
961		    ("vm_page_cache_free: page %p has inconsistent flags", m));
962		cnt.v_cache_count--;
963		cnt.v_free_count++;
964	}
965	empty = object->cache == NULL;
966	mtx_unlock(&vm_page_queue_free_mtx);
967	if (object->type == OBJT_VNODE && empty)
968		vdrop(object->handle);
969}
970
971/*
972 *	Returns the cached page that is associated with the given
973 *	object and offset.  If, however, none exists, returns NULL.
974 *
975 *	The free page queue must be locked.
976 */
977static inline vm_page_t
978vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
979{
980	vm_page_t m;
981
982	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
983	if ((m = object->cache) != NULL && m->pindex != pindex) {
984		m = vm_page_splay(pindex, m);
985		if ((object->cache = m)->pindex != pindex)
986			m = NULL;
987	}
988	return (m);
989}
990
991/*
992 *	Remove the given cached page from its containing object's
993 *	collection of cached pages.
994 *
995 *	The free page queue must be locked.
996 */
997void
998vm_page_cache_remove(vm_page_t m)
999{
1000	vm_object_t object;
1001	vm_page_t root;
1002
1003	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1004	KASSERT((m->flags & PG_CACHED) != 0,
1005	    ("vm_page_cache_remove: page %p is not cached", m));
1006	object = m->object;
1007	if (m != object->cache) {
1008		root = vm_page_splay(m->pindex, object->cache);
1009		KASSERT(root == m,
1010		    ("vm_page_cache_remove: page %p is not cached in object %p",
1011		    m, object));
1012	}
1013	if (m->left == NULL)
1014		root = m->right;
1015	else if (m->right == NULL)
1016		root = m->left;
1017	else {
1018		root = vm_page_splay(m->pindex, m->left);
1019		root->right = m->right;
1020	}
1021	object->cache = root;
1022	m->object = NULL;
1023	cnt.v_cache_count--;
1024}
1025
1026/*
1027 *	Transfer all of the cached pages with offset greater than or
1028 *	equal to 'offidxstart' from the original object's cache to the
1029 *	new object's cache.  However, any cached pages with offset
1030 *	greater than or equal to the new object's size are kept in the
1031 *	original object.  Initially, the new object's cache must be
1032 *	empty.  Offset 'offidxstart' in the original object must
1033 *	correspond to offset zero in the new object.
1034 *
1035 *	The new object must be locked.
1036 */
1037void
1038vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1039    vm_object_t new_object)
1040{
1041	vm_page_t m, m_next;
1042
1043	/*
1044	 * Insertion into an object's collection of cached pages
1045	 * requires the object to be locked.  In contrast, removal does
1046	 * not.
1047	 */
1048	VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
1049	KASSERT(new_object->cache == NULL,
1050	    ("vm_page_cache_transfer: object %p has cached pages",
1051	    new_object));
1052	mtx_lock(&vm_page_queue_free_mtx);
1053	if ((m = orig_object->cache) != NULL) {
1054		/*
1055		 * Transfer all of the pages with offset greater than or
1056		 * equal to 'offidxstart' from the original object's
1057		 * cache to the new object's cache.
1058		 */
1059		m = vm_page_splay(offidxstart, m);
1060		if (m->pindex < offidxstart) {
1061			orig_object->cache = m;
1062			new_object->cache = m->right;
1063			m->right = NULL;
1064		} else {
1065			orig_object->cache = m->left;
1066			new_object->cache = m;
1067			m->left = NULL;
1068		}
1069		while ((m = new_object->cache) != NULL) {
1070			if ((m->pindex - offidxstart) >= new_object->size) {
1071				/*
1072				 * Return all of the cached pages with
1073				 * offset greater than or equal to the
1074				 * new object's size to the original
1075				 * object's cache.
1076				 */
1077				new_object->cache = m->left;
1078				m->left = orig_object->cache;
1079				orig_object->cache = m;
1080				break;
1081			}
1082			m_next = vm_page_splay(m->pindex, m->right);
1083			/* Update the page's object and offset. */
1084			m->object = new_object;
1085			m->pindex -= offidxstart;
1086			if (m_next == NULL)
1087				break;
1088			m->right = NULL;
1089			m_next->left = m;
1090			new_object->cache = m_next;
1091		}
1092		KASSERT(new_object->cache == NULL ||
1093		    new_object->type == OBJT_SWAP,
1094		    ("vm_page_cache_transfer: object %p's type is incompatible"
1095		    " with cached pages", new_object));
1096	}
1097	mtx_unlock(&vm_page_queue_free_mtx);
1098}
1099
1100/*
1101 *	vm_page_alloc:
1102 *
1103 *	Allocate and return a memory cell associated
1104 *	with this VM object/offset pair.
1105 *
1106 *	page_req classes:
1107 *	VM_ALLOC_NORMAL		normal process request
1108 *	VM_ALLOC_SYSTEM		system *really* needs a page
1109 *	VM_ALLOC_INTERRUPT	interrupt time request
1110 *	VM_ALLOC_ZERO		zero page
1111 *	VM_ALLOC_WIRED		wire the allocated page
1112 *	VM_ALLOC_NOOBJ		page is not associated with a vm object
1113 *	VM_ALLOC_NOBUSY		do not set the page busy
1114 *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1115 *				is cached
1116 *
1117 *	This routine may not sleep.
1118 */
1119vm_page_t
1120vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1121{
1122	struct vnode *vp = NULL;
1123	vm_object_t m_object;
1124	vm_page_t m;
1125	int flags, page_req;
1126
1127	page_req = req & VM_ALLOC_CLASS_MASK;
1128	KASSERT(curthread->td_intr_nesting_level == 0 ||
1129	    page_req == VM_ALLOC_INTERRUPT,
1130	    ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context"));
1131
1132	if ((req & VM_ALLOC_NOOBJ) == 0) {
1133		KASSERT(object != NULL,
1134		    ("vm_page_alloc: NULL object."));
1135		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1136	}
1137
1138	/*
1139	 * The pager is allowed to eat deeper into the free page list.
1140	 */
1141	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
1142		page_req = VM_ALLOC_SYSTEM;
1143	};
1144
1145	mtx_lock(&vm_page_queue_free_mtx);
1146	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1147	    (page_req == VM_ALLOC_SYSTEM &&
1148	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1149	    (page_req == VM_ALLOC_INTERRUPT &&
1150	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1151		/*
1152		 * Allocate from the free queue if the number of free pages
1153		 * exceeds the minimum for the request class.
1154		 */
1155		if (object != NULL &&
1156		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1157			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1158				mtx_unlock(&vm_page_queue_free_mtx);
1159				return (NULL);
1160			}
1161			if (vm_phys_unfree_page(m))
1162				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1163#if VM_NRESERVLEVEL > 0
1164			else if (!vm_reserv_reactivate_page(m))
1165#else
1166			else
1167#endif
1168				panic("vm_page_alloc: cache page %p is missing"
1169				    " from the free queue", m);
1170		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1171			mtx_unlock(&vm_page_queue_free_mtx);
1172			return (NULL);
1173#if VM_NRESERVLEVEL > 0
1174		} else if (object == NULL || object->type == OBJT_DEVICE ||
1175		    object->type == OBJT_SG ||
1176		    (object->flags & OBJ_COLORED) == 0 ||
1177		    (m = vm_reserv_alloc_page(object, pindex)) == NULL) {
1178#else
1179		} else {
1180#endif
1181			m = vm_phys_alloc_pages(object != NULL ?
1182			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1183#if VM_NRESERVLEVEL > 0
1184			if (m == NULL && vm_reserv_reclaim_inactive()) {
1185				m = vm_phys_alloc_pages(object != NULL ?
1186				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1187				    0);
1188			}
1189#endif
1190		}
1191	} else {
1192		/*
1193		 * Not allocatable, give up.
1194		 */
1195		mtx_unlock(&vm_page_queue_free_mtx);
1196		atomic_add_int(&vm_pageout_deficit, 1);
1197		pagedaemon_wakeup();
1198		return (NULL);
1199	}
1200
1201	/*
1202	 *  At this point we had better have found a good page.
1203	 */
1204
1205	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1206	KASSERT(m->queue == PQ_NONE,
1207	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1208	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1209	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1210	KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
1211	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1212	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1213	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1214	    pmap_page_get_memattr(m)));
1215	if ((m->flags & PG_CACHED) != 0) {
1216		KASSERT(m->valid != 0,
1217		    ("vm_page_alloc: cached page %p is invalid", m));
1218		if (m->object == object && m->pindex == pindex)
1219	  		cnt.v_reactivated++;
1220		else
1221			m->valid = 0;
1222		m_object = m->object;
1223		vm_page_cache_remove(m);
1224		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1225			vp = m_object->handle;
1226	} else {
1227		KASSERT(VM_PAGE_IS_FREE(m),
1228		    ("vm_page_alloc: page %p is not free", m));
1229		KASSERT(m->valid == 0,
1230		    ("vm_page_alloc: free page %p is valid", m));
1231		cnt.v_free_count--;
1232	}
1233
1234	/*
1235	 * Initialize structure.  Only the PG_ZERO flag is inherited.
1236	 */
1237	flags = 0;
1238	if (m->flags & PG_ZERO) {
1239		vm_page_zero_count--;
1240		if (req & VM_ALLOC_ZERO)
1241			flags = PG_ZERO;
1242	}
1243	if (object == NULL || object->type == OBJT_PHYS)
1244		flags |= PG_UNMANAGED;
1245	m->flags = flags;
1246	if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
1247		m->oflags = 0;
1248	else
1249		m->oflags = VPO_BUSY;
1250	if (req & VM_ALLOC_WIRED) {
1251		atomic_add_int(&cnt.v_wire_count, 1);
1252		m->wire_count = 1;
1253	}
1254	m->act_count = 0;
1255	mtx_unlock(&vm_page_queue_free_mtx);
1256
1257	if (object != NULL) {
1258		/* Ignore device objects; the pager sets "memattr" for them. */
1259		if (object->memattr != VM_MEMATTR_DEFAULT &&
1260		    object->type != OBJT_DEVICE && object->type != OBJT_SG)
1261			pmap_page_set_memattr(m, object->memattr);
1262		vm_page_insert(m, object, pindex);
1263	} else
1264		m->pindex = pindex;
1265
1266	/*
1267	 * The following call to vdrop() must come after the above call
1268	 * to vm_page_insert() in case both affect the same object and
1269	 * vnode.  Otherwise, the affected vnode's hold count could
1270	 * temporarily become zero.
1271	 */
1272	if (vp != NULL)
1273		vdrop(vp);
1274
1275	/*
1276	 * Don't wakeup too often - wakeup the pageout daemon when
1277	 * we would be nearly out of memory.
1278	 */
1279	if (vm_paging_needed())
1280		pagedaemon_wakeup();
1281
1282	return (m);
1283}
1284
1285/*
1286 *	vm_wait:	(also see VM_WAIT macro)
1287 *
1288 *	Block until free pages are available for allocation
1289 *	- Called in various places before memory allocations.
1290 */
1291void
1292vm_wait(void)
1293{
1294
1295	mtx_lock(&vm_page_queue_free_mtx);
1296	if (curproc == pageproc) {
1297		vm_pageout_pages_needed = 1;
1298		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1299		    PDROP | PSWP, "VMWait", 0);
1300	} else {
1301		if (!vm_pages_needed) {
1302			vm_pages_needed = 1;
1303			wakeup(&vm_pages_needed);
1304		}
1305		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1306		    "vmwait", 0);
1307	}
1308}
1309
1310/*
1311 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1312 *
1313 *	Block until free pages are available for allocation
1314 *	- Called only in vm_fault so that processes page faulting
1315 *	  can be easily tracked.
1316 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1317 *	  processes will be able to grab memory first.  Do not change
1318 *	  this balance without careful testing first.
1319 */
1320void
1321vm_waitpfault(void)
1322{
1323
1324	mtx_lock(&vm_page_queue_free_mtx);
1325	if (!vm_pages_needed) {
1326		vm_pages_needed = 1;
1327		wakeup(&vm_pages_needed);
1328	}
1329	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1330	    "pfault", 0);
1331}
1332
1333/*
1334 *	vm_page_requeue:
1335 *
1336 *	If the given page is contained within a page queue, move it to the tail
1337 *	of that queue.
1338 *
1339 *	The page queues must be locked.
1340 */
1341void
1342vm_page_requeue(vm_page_t m)
1343{
1344	int queue = VM_PAGE_GETQUEUE(m);
1345	struct vpgqueues *vpq;
1346
1347	if (queue != PQ_NONE) {
1348		vpq = &vm_page_queues[queue];
1349		TAILQ_REMOVE(&vpq->pl, m, pageq);
1350		TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
1351	}
1352}
1353
1354/*
1355 *	vm_page_queue_remove:
1356 *
1357 *	Remove the given page from the specified queue.
1358 *
1359 *	The page and page queues must be locked.
1360 */
1361static __inline void
1362vm_page_queue_remove(int queue, vm_page_t m)
1363{
1364	struct vpgqueues *pq;
1365
1366	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1367	vm_page_lock_assert(m, MA_OWNED);
1368	pq = &vm_page_queues[queue];
1369	TAILQ_REMOVE(&pq->pl, m, pageq);
1370	(*pq->cnt)--;
1371}
1372
1373/*
1374 *	vm_pageq_remove:
1375 *
1376 *	Remove a page from its queue.
1377 *
1378 *	The given page must be locked.
1379 *	This routine may not block.
1380 */
1381void
1382vm_pageq_remove(vm_page_t m)
1383{
1384	int queue = VM_PAGE_GETQUEUE(m);
1385
1386	vm_page_lock_assert(m, MA_OWNED);
1387	if (queue != PQ_NONE) {
1388		vm_page_lock_queues();
1389		VM_PAGE_SETQUEUE2(m, PQ_NONE);
1390		vm_page_queue_remove(queue, m);
1391		vm_page_unlock_queues();
1392	}
1393}
1394
1395/*
1396 *	vm_page_enqueue:
1397 *
1398 *	Add the given page to the specified queue.
1399 *
1400 *	The page queues must be locked.
1401 */
1402static void
1403vm_page_enqueue(int queue, vm_page_t m)
1404{
1405	struct vpgqueues *vpq;
1406
1407	vpq = &vm_page_queues[queue];
1408	VM_PAGE_SETQUEUE2(m, queue);
1409	TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
1410	++*vpq->cnt;
1411}
1412
1413/*
1414 *	vm_page_activate:
1415 *
1416 *	Put the specified page on the active list (if appropriate).
1417 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1418 *	mess with it.
1419 *
1420 *	The page must be locked.
1421 *	This routine may not block.
1422 */
1423void
1424vm_page_activate(vm_page_t m)
1425{
1426	int queue;
1427
1428	vm_page_lock_assert(m, MA_OWNED);
1429	if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) != PQ_ACTIVE) {
1430		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1431			if (m->act_count < ACT_INIT)
1432				m->act_count = ACT_INIT;
1433			vm_page_lock_queues();
1434			if (queue != PQ_NONE)
1435				vm_page_queue_remove(queue, m);
1436			vm_page_enqueue(PQ_ACTIVE, m);
1437			vm_page_unlock_queues();
1438		} else
1439			KASSERT(queue == PQ_NONE,
1440			    ("vm_page_activate: wired page %p is queued", m));
1441	} else {
1442		if (m->act_count < ACT_INIT)
1443			m->act_count = ACT_INIT;
1444	}
1445}
1446
1447/*
1448 *	vm_page_free_wakeup:
1449 *
1450 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1451 *	routine is called when a page has been added to the cache or free
1452 *	queues.
1453 *
1454 *	The page queues must be locked.
1455 *	This routine may not block.
1456 */
1457static inline void
1458vm_page_free_wakeup(void)
1459{
1460
1461	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1462	/*
1463	 * if pageout daemon needs pages, then tell it that there are
1464	 * some free.
1465	 */
1466	if (vm_pageout_pages_needed &&
1467	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1468		wakeup(&vm_pageout_pages_needed);
1469		vm_pageout_pages_needed = 0;
1470	}
1471	/*
1472	 * wakeup processes that are waiting on memory if we hit a
1473	 * high water mark. And wakeup scheduler process if we have
1474	 * lots of memory. this process will swapin processes.
1475	 */
1476	if (vm_pages_needed && !vm_page_count_min()) {
1477		vm_pages_needed = 0;
1478		wakeup(&cnt.v_free_count);
1479	}
1480}
1481
1482/*
1483 *	vm_page_free_toq:
1484 *
1485 *	Returns the given page to the free list,
1486 *	disassociating it with any VM object.
1487 *
1488 *	Object and page must be locked prior to entry.
1489 *	This routine may not block.
1490 */
1491
1492void
1493vm_page_free_toq(vm_page_t m)
1494{
1495
1496	if ((m->flags & PG_UNMANAGED) == 0) {
1497		vm_page_lock_assert(m, MA_OWNED);
1498		KASSERT(!pmap_page_is_mapped(m),
1499		    ("vm_page_free_toq: freeing mapped page %p", m));
1500	}
1501	PCPU_INC(cnt.v_tfree);
1502
1503	if (m->busy || VM_PAGE_IS_FREE(m)) {
1504		printf(
1505		"vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n",
1506		    (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0,
1507		    m->hold_count);
1508		if (VM_PAGE_IS_FREE(m))
1509			panic("vm_page_free: freeing free page");
1510		else
1511			panic("vm_page_free: freeing busy page");
1512	}
1513
1514	/*
1515	 * unqueue, then remove page.  Note that we cannot destroy
1516	 * the page here because we do not want to call the pager's
1517	 * callback routine until after we've put the page on the
1518	 * appropriate free queue.
1519	 */
1520	if ((m->flags & PG_UNMANAGED) == 0)
1521		vm_pageq_remove(m);
1522	vm_page_remove(m);
1523
1524	/*
1525	 * If fictitious remove object association and
1526	 * return, otherwise delay object association removal.
1527	 */
1528	if ((m->flags & PG_FICTITIOUS) != 0) {
1529		return;
1530	}
1531
1532	m->valid = 0;
1533	vm_page_undirty(m);
1534
1535	if (m->wire_count != 0) {
1536		if (m->wire_count > 1) {
1537			panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1538				m->wire_count, (long)m->pindex);
1539		}
1540		panic("vm_page_free: freeing wired page");
1541	}
1542	if (m->hold_count != 0) {
1543		m->flags &= ~PG_ZERO;
1544		vm_page_lock_queues();
1545		vm_page_enqueue(PQ_HOLD, m);
1546		vm_page_unlock_queues();
1547	} else {
1548		/*
1549		 * Restore the default memory attribute to the page.
1550		 */
1551		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
1552			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
1553
1554		/*
1555		 * Insert the page into the physical memory allocator's
1556		 * cache/free page queues.
1557		 */
1558		mtx_lock(&vm_page_queue_free_mtx);
1559		m->flags |= PG_FREE;
1560		cnt.v_free_count++;
1561#if VM_NRESERVLEVEL > 0
1562		if (!vm_reserv_free_page(m))
1563#else
1564		if (TRUE)
1565#endif
1566			vm_phys_free_pages(m, 0);
1567		if ((m->flags & PG_ZERO) != 0)
1568			++vm_page_zero_count;
1569		else
1570			vm_page_zero_idle_wakeup();
1571		vm_page_free_wakeup();
1572		mtx_unlock(&vm_page_queue_free_mtx);
1573	}
1574}
1575
1576/*
1577 *	vm_page_wire:
1578 *
1579 *	Mark this page as wired down by yet
1580 *	another map, removing it from paging queues
1581 *	as necessary.
1582 *
1583 *	The page must be locked.
1584 *	This routine may not block.
1585 */
1586void
1587vm_page_wire(vm_page_t m)
1588{
1589
1590	/*
1591	 * Only bump the wire statistics if the page is not already wired,
1592	 * and only unqueue the page if it is on some queue (if it is unmanaged
1593	 * it is already off the queues).
1594	 */
1595	vm_page_lock_assert(m, MA_OWNED);
1596	if (m->flags & PG_FICTITIOUS)
1597		return;
1598	if (m->wire_count == 0) {
1599		if ((m->flags & PG_UNMANAGED) == 0)
1600			vm_pageq_remove(m);
1601		atomic_add_int(&cnt.v_wire_count, 1);
1602	}
1603	m->wire_count++;
1604	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1605}
1606
1607/*
1608 *	vm_page_unwire:
1609 *
1610 *	Release one wiring of this page, potentially
1611 *	enabling it to be paged again.
1612 *
1613 *	Many pages placed on the inactive queue should actually go
1614 *	into the cache, but it is difficult to figure out which.  What
1615 *	we do instead, if the inactive target is well met, is to put
1616 *	clean pages at the head of the inactive queue instead of the tail.
1617 *	This will cause them to be moved to the cache more quickly and
1618 *	if not actively re-referenced, freed more quickly.  If we just
1619 *	stick these pages at the end of the inactive queue, heavy filesystem
1620 *	meta-data accesses can cause an unnecessary paging load on memory bound
1621 *	processes.  This optimization causes one-time-use metadata to be
1622 *	reused more quickly.
1623 *
1624 *	BUT, if we are in a low-memory situation we have no choice but to
1625 *	put clean pages on the cache queue.
1626 *
1627 *	A number of routines use vm_page_unwire() to guarantee that the page
1628 *	will go into either the inactive or active queues, and will NEVER
1629 *	be placed in the cache - for example, just after dirtying a page.
1630 *	dirty pages in the cache are not allowed.
1631 *
1632 *	The page must be locked.
1633 *	This routine may not block.
1634 */
1635void
1636vm_page_unwire(vm_page_t m, int activate)
1637{
1638
1639	if ((m->flags & PG_UNMANAGED) == 0)
1640		vm_page_lock_assert(m, MA_OWNED);
1641	if (m->flags & PG_FICTITIOUS)
1642		return;
1643	if (m->wire_count > 0) {
1644		m->wire_count--;
1645		if (m->wire_count == 0) {
1646			atomic_subtract_int(&cnt.v_wire_count, 1);
1647			if ((m->flags & PG_UNMANAGED) != 0)
1648				return;
1649			vm_page_lock_queues();
1650			if (activate)
1651				vm_page_enqueue(PQ_ACTIVE, m);
1652			else {
1653				vm_page_flag_clear(m, PG_WINATCFLS);
1654				vm_page_enqueue(PQ_INACTIVE, m);
1655			}
1656			vm_page_unlock_queues();
1657		}
1658	} else {
1659		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
1660	}
1661}
1662
1663/*
1664 * Move the specified page to the inactive queue.
1665 *
1666 * Normally athead is 0 resulting in LRU operation.  athead is set
1667 * to 1 if we want this page to be 'as if it were placed in the cache',
1668 * except without unmapping it from the process address space.
1669 *
1670 * This routine may not block.
1671 */
1672static inline void
1673_vm_page_deactivate(vm_page_t m, int athead)
1674{
1675	int queue;
1676
1677	vm_page_lock_assert(m, MA_OWNED);
1678
1679	/*
1680	 * Ignore if already inactive.
1681	 */
1682	if ((queue = VM_PAGE_GETKNOWNQUEUE2(m)) == PQ_INACTIVE)
1683		return;
1684	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1685		vm_page_lock_queues();
1686		vm_page_flag_clear(m, PG_WINATCFLS);
1687		if (queue != PQ_NONE)
1688			vm_page_queue_remove(queue, m);
1689		if (athead)
1690			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m,
1691			    pageq);
1692		else
1693			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m,
1694			    pageq);
1695		VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
1696		cnt.v_inactive_count++;
1697		vm_page_unlock_queues();
1698	}
1699}
1700
1701/*
1702 * Move the specified page to the inactive queue.
1703 *
1704 * The page must be locked.
1705 */
1706void
1707vm_page_deactivate(vm_page_t m)
1708{
1709
1710	_vm_page_deactivate(m, 0);
1711}
1712
1713/*
1714 * vm_page_try_to_cache:
1715 *
1716 * Returns 0 on failure, 1 on success
1717 */
1718int
1719vm_page_try_to_cache(vm_page_t m)
1720{
1721
1722	vm_page_lock_assert(m, MA_OWNED);
1723	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1724	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1725	    (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
1726		return (0);
1727	pmap_remove_all(m);
1728	if (m->dirty)
1729		return (0);
1730	vm_page_cache(m);
1731	return (1);
1732}
1733
1734/*
1735 * vm_page_try_to_free()
1736 *
1737 *	Attempt to free the page.  If we cannot free it, we do nothing.
1738 *	1 is returned on success, 0 on failure.
1739 */
1740int
1741vm_page_try_to_free(vm_page_t m)
1742{
1743
1744	vm_page_lock_assert(m, MA_OWNED);
1745	if (m->object != NULL)
1746		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1747	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1748	    (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED))
1749		return (0);
1750	pmap_remove_all(m);
1751	if (m->dirty)
1752		return (0);
1753	vm_page_free(m);
1754	return (1);
1755}
1756
1757/*
1758 * vm_page_cache
1759 *
1760 * Put the specified page onto the page cache queue (if appropriate).
1761 *
1762 * This routine may not block.
1763 */
1764void
1765vm_page_cache(vm_page_t m)
1766{
1767	vm_object_t object;
1768	vm_page_t root;
1769
1770	vm_page_lock_assert(m, MA_OWNED);
1771	object = m->object;
1772	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1773	if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
1774	    m->hold_count || m->wire_count)
1775		panic("vm_page_cache: attempting to cache busy page");
1776	pmap_remove_all(m);
1777	if (m->dirty != 0)
1778		panic("vm_page_cache: page %p is dirty", m);
1779	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
1780	    (object->type == OBJT_SWAP &&
1781	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
1782		/*
1783		 * Hypothesis: A cache-elgible page belonging to a
1784		 * default object or swap object but without a backing
1785		 * store must be zero filled.
1786		 */
1787		vm_page_free(m);
1788		return;
1789	}
1790	KASSERT((m->flags & PG_CACHED) == 0,
1791	    ("vm_page_cache: page %p is already cached", m));
1792	PCPU_INC(cnt.v_tcached);
1793
1794	/*
1795	 * Remove the page from the paging queues.
1796	 */
1797	vm_pageq_remove(m);
1798
1799	/*
1800	 * Remove the page from the object's collection of resident
1801	 * pages.
1802	 */
1803	if (m != object->root)
1804		vm_page_splay(m->pindex, object->root);
1805	if (m->left == NULL)
1806		root = m->right;
1807	else {
1808		root = vm_page_splay(m->pindex, m->left);
1809		root->right = m->right;
1810	}
1811	object->root = root;
1812	TAILQ_REMOVE(&object->memq, m, listq);
1813	object->resident_page_count--;
1814	object->generation++;
1815
1816	/*
1817	 * Restore the default memory attribute to the page.
1818	 */
1819	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
1820		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
1821
1822	/*
1823	 * Insert the page into the object's collection of cached pages
1824	 * and the physical memory allocator's cache/free page queues.
1825	 */
1826	m->flags &= ~PG_ZERO;
1827	mtx_lock(&vm_page_queue_free_mtx);
1828	m->flags |= PG_CACHED;
1829	cnt.v_cache_count++;
1830	root = object->cache;
1831	if (root == NULL) {
1832		m->left = NULL;
1833		m->right = NULL;
1834	} else {
1835		root = vm_page_splay(m->pindex, root);
1836		if (m->pindex < root->pindex) {
1837			m->left = root->left;
1838			m->right = root;
1839			root->left = NULL;
1840		} else if (__predict_false(m->pindex == root->pindex))
1841			panic("vm_page_cache: offset already cached");
1842		else {
1843			m->right = root->right;
1844			m->left = root;
1845			root->right = NULL;
1846		}
1847	}
1848	object->cache = m;
1849#if VM_NRESERVLEVEL > 0
1850	if (!vm_reserv_free_page(m)) {
1851#else
1852	if (TRUE) {
1853#endif
1854		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
1855		vm_phys_free_pages(m, 0);
1856	}
1857	vm_page_free_wakeup();
1858	mtx_unlock(&vm_page_queue_free_mtx);
1859
1860	/*
1861	 * Increment the vnode's hold count if this is the object's only
1862	 * cached page.  Decrement the vnode's hold count if this was
1863	 * the object's only resident page.
1864	 */
1865	if (object->type == OBJT_VNODE) {
1866		if (root == NULL && object->resident_page_count != 0)
1867			vhold(object->handle);
1868		else if (root != NULL && object->resident_page_count == 0)
1869			vdrop(object->handle);
1870	}
1871}
1872
1873/*
1874 * vm_page_dontneed
1875 *
1876 *	Cache, deactivate, or do nothing as appropriate.  This routine
1877 *	is typically used by madvise() MADV_DONTNEED.
1878 *
1879 *	Generally speaking we want to move the page into the cache so
1880 *	it gets reused quickly.  However, this can result in a silly syndrome
1881 *	due to the page recycling too quickly.  Small objects will not be
1882 *	fully cached.  On the otherhand, if we move the page to the inactive
1883 *	queue we wind up with a problem whereby very large objects
1884 *	unnecessarily blow away our inactive and cache queues.
1885 *
1886 *	The solution is to move the pages based on a fixed weighting.  We
1887 *	either leave them alone, deactivate them, or move them to the cache,
1888 *	where moving them to the cache has the highest weighting.
1889 *	By forcing some pages into other queues we eventually force the
1890 *	system to balance the queues, potentially recovering other unrelated
1891 *	space from active.  The idea is to not force this to happen too
1892 *	often.
1893 */
1894void
1895vm_page_dontneed(vm_page_t m)
1896{
1897	int dnw;
1898	int head;
1899
1900	vm_page_lock_assert(m, MA_OWNED);
1901	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1902	dnw = PCPU_GET(dnweight);
1903	PCPU_INC(dnweight);
1904
1905	/*
1906	 * Occasionally leave the page alone.
1907	 */
1908	if ((dnw & 0x01F0) == 0 ||
1909	    VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) {
1910		if (m->act_count >= ACT_INIT)
1911			--m->act_count;
1912		return;
1913	}
1914
1915	/*
1916	 * Clear any references to the page.  Otherwise, the page daemon will
1917	 * immediately reactivate the page.
1918	 *
1919	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
1920	 * pmap operation, such as pmap_remove(), could clear a reference in
1921	 * the pmap and set PG_REFERENCED on the page before the
1922	 * pmap_clear_reference() had completed.  Consequently, the page would
1923	 * appear referenced based upon an old reference that occurred before
1924	 * this function ran.
1925	 */
1926	pmap_clear_reference(m);
1927	vm_page_lock_queues();
1928	vm_page_flag_clear(m, PG_REFERENCED);
1929	vm_page_unlock_queues();
1930
1931	if (m->dirty == 0 && pmap_is_modified(m))
1932		vm_page_dirty(m);
1933
1934	if (m->dirty || (dnw & 0x0070) == 0) {
1935		/*
1936		 * Deactivate the page 3 times out of 32.
1937		 */
1938		head = 0;
1939	} else {
1940		/*
1941		 * Cache the page 28 times out of every 32.  Note that
1942		 * the page is deactivated instead of cached, but placed
1943		 * at the head of the queue instead of the tail.
1944		 */
1945		head = 1;
1946	}
1947	_vm_page_deactivate(m, head);
1948}
1949
1950/*
1951 * Grab a page, waiting until we are waken up due to the page
1952 * changing state.  We keep on waiting, if the page continues
1953 * to be in the object.  If the page doesn't exist, first allocate it
1954 * and then conditionally zero it.
1955 *
1956 * This routine may block.
1957 */
1958vm_page_t
1959vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1960{
1961	vm_page_t m;
1962
1963	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1964retrylookup:
1965	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1966		if ((m->oflags & VPO_BUSY) != 0 || m->busy != 0) {
1967			if ((allocflags & VM_ALLOC_RETRY) != 0) {
1968				/*
1969				 * Reference the page before unlocking and
1970				 * sleeping so that the page daemon is less
1971				 * likely to reclaim it.
1972				 */
1973				vm_page_lock_queues();
1974				vm_page_flag_set(m, PG_REFERENCED);
1975			}
1976			vm_page_sleep(m, "pgrbwt");
1977			if ((allocflags & VM_ALLOC_RETRY) == 0)
1978				return (NULL);
1979			goto retrylookup;
1980		} else {
1981			if ((allocflags & VM_ALLOC_WIRED) != 0) {
1982				vm_page_lock(m);
1983				vm_page_wire(m);
1984				vm_page_unlock(m);
1985			}
1986			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
1987				vm_page_busy(m);
1988			return (m);
1989		}
1990	}
1991	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1992	if (m == NULL) {
1993		VM_OBJECT_UNLOCK(object);
1994		VM_WAIT;
1995		VM_OBJECT_LOCK(object);
1996		if ((allocflags & VM_ALLOC_RETRY) == 0)
1997			return (NULL);
1998		goto retrylookup;
1999	} else if (m->valid != 0)
2000		return (m);
2001	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2002		pmap_zero_page(m);
2003	return (m);
2004}
2005
2006/*
2007 * Mapping function for valid bits or for dirty bits in
2008 * a page.  May not block.
2009 *
2010 * Inputs are required to range within a page.
2011 */
2012int
2013vm_page_bits(int base, int size)
2014{
2015	int first_bit;
2016	int last_bit;
2017
2018	KASSERT(
2019	    base + size <= PAGE_SIZE,
2020	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2021	);
2022
2023	if (size == 0)		/* handle degenerate case */
2024		return (0);
2025
2026	first_bit = base >> DEV_BSHIFT;
2027	last_bit = (base + size - 1) >> DEV_BSHIFT;
2028
2029	return ((2 << last_bit) - (1 << first_bit));
2030}
2031
2032/*
2033 *	vm_page_set_valid:
2034 *
2035 *	Sets portions of a page valid.  The arguments are expected
2036 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2037 *	of any partial chunks touched by the range.  The invalid portion of
2038 *	such chunks will be zeroed.
2039 *
2040 *	(base + size) must be less then or equal to PAGE_SIZE.
2041 */
2042void
2043vm_page_set_valid(vm_page_t m, int base, int size)
2044{
2045	int endoff, frag;
2046
2047	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2048	if (size == 0)	/* handle degenerate case */
2049		return;
2050
2051	/*
2052	 * If the base is not DEV_BSIZE aligned and the valid
2053	 * bit is clear, we have to zero out a portion of the
2054	 * first block.
2055	 */
2056	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2057	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2058		pmap_zero_page_area(m, frag, base - frag);
2059
2060	/*
2061	 * If the ending offset is not DEV_BSIZE aligned and the
2062	 * valid bit is clear, we have to zero out a portion of
2063	 * the last block.
2064	 */
2065	endoff = base + size;
2066	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2067	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2068		pmap_zero_page_area(m, endoff,
2069		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2070
2071	/*
2072	 * Assert that no previously invalid block that is now being validated
2073	 * is already dirty.
2074	 */
2075	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2076	    ("vm_page_set_valid: page %p is dirty", m));
2077
2078	/*
2079	 * Set valid bits inclusive of any overlap.
2080	 */
2081	m->valid |= vm_page_bits(base, size);
2082}
2083
2084/*
2085 * Clear the given bits from the specified page's dirty field.
2086 */
2087static __inline void
2088vm_page_clear_dirty_mask(vm_page_t m, int pagebits)
2089{
2090
2091	/*
2092	 * If the object is locked and the page is neither VPO_BUSY nor
2093	 * PG_WRITEABLE, then the page's dirty field cannot possibly be
2094	 * modified by a concurrent pmap operation.
2095	 */
2096	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2097	if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0)
2098		m->dirty &= ~pagebits;
2099	else {
2100		vm_page_lock_queues();
2101		m->dirty &= ~pagebits;
2102		vm_page_unlock_queues();
2103	}
2104}
2105
2106/*
2107 *	vm_page_set_validclean:
2108 *
2109 *	Sets portions of a page valid and clean.  The arguments are expected
2110 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2111 *	of any partial chunks touched by the range.  The invalid portion of
2112 *	such chunks will be zero'd.
2113 *
2114 *	This routine may not block.
2115 *
2116 *	(base + size) must be less then or equal to PAGE_SIZE.
2117 */
2118void
2119vm_page_set_validclean(vm_page_t m, int base, int size)
2120{
2121	u_long oldvalid;
2122	int endoff, frag, pagebits;
2123
2124	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2125	if (size == 0)	/* handle degenerate case */
2126		return;
2127
2128	/*
2129	 * If the base is not DEV_BSIZE aligned and the valid
2130	 * bit is clear, we have to zero out a portion of the
2131	 * first block.
2132	 */
2133	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2134	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2135		pmap_zero_page_area(m, frag, base - frag);
2136
2137	/*
2138	 * If the ending offset is not DEV_BSIZE aligned and the
2139	 * valid bit is clear, we have to zero out a portion of
2140	 * the last block.
2141	 */
2142	endoff = base + size;
2143	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2144	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2145		pmap_zero_page_area(m, endoff,
2146		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2147
2148	/*
2149	 * Set valid, clear dirty bits.  If validating the entire
2150	 * page we can safely clear the pmap modify bit.  We also
2151	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2152	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2153	 * be set again.
2154	 *
2155	 * We set valid bits inclusive of any overlap, but we can only
2156	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2157	 * the range.
2158	 */
2159	oldvalid = m->valid;
2160	pagebits = vm_page_bits(base, size);
2161	m->valid |= pagebits;
2162#if 0	/* NOT YET */
2163	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2164		frag = DEV_BSIZE - frag;
2165		base += frag;
2166		size -= frag;
2167		if (size < 0)
2168			size = 0;
2169	}
2170	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2171#endif
2172	if (base == 0 && size == PAGE_SIZE) {
2173		/*
2174		 * The page can only be modified within the pmap if it is
2175		 * mapped, and it can only be mapped if it was previously
2176		 * fully valid.
2177		 */
2178		if (oldvalid == VM_PAGE_BITS_ALL)
2179			/*
2180			 * Perform the pmap_clear_modify() first.  Otherwise,
2181			 * a concurrent pmap operation, such as
2182			 * pmap_protect(), could clear a modification in the
2183			 * pmap and set the dirty field on the page before
2184			 * pmap_clear_modify() had begun and after the dirty
2185			 * field was cleared here.
2186			 */
2187			pmap_clear_modify(m);
2188		m->dirty = 0;
2189		m->oflags &= ~VPO_NOSYNC;
2190	} else if (oldvalid != VM_PAGE_BITS_ALL)
2191		m->dirty &= ~pagebits;
2192	else
2193		vm_page_clear_dirty_mask(m, pagebits);
2194}
2195
2196void
2197vm_page_clear_dirty(vm_page_t m, int base, int size)
2198{
2199
2200	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2201}
2202
2203/*
2204 *	vm_page_set_invalid:
2205 *
2206 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2207 *	valid and dirty bits for the effected areas are cleared.
2208 *
2209 *	May not block.
2210 */
2211void
2212vm_page_set_invalid(vm_page_t m, int base, int size)
2213{
2214	int bits;
2215
2216	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2217	KASSERT((m->oflags & VPO_BUSY) == 0,
2218	    ("vm_page_set_invalid: page %p is busy", m));
2219	bits = vm_page_bits(base, size);
2220	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2221		pmap_remove_all(m);
2222	KASSERT(!pmap_page_is_mapped(m),
2223	    ("vm_page_set_invalid: page %p is mapped", m));
2224	m->valid &= ~bits;
2225	m->dirty &= ~bits;
2226	m->object->generation++;
2227}
2228
2229/*
2230 * vm_page_zero_invalid()
2231 *
2232 *	The kernel assumes that the invalid portions of a page contain
2233 *	garbage, but such pages can be mapped into memory by user code.
2234 *	When this occurs, we must zero out the non-valid portions of the
2235 *	page so user code sees what it expects.
2236 *
2237 *	Pages are most often semi-valid when the end of a file is mapped
2238 *	into memory and the file's size is not page aligned.
2239 */
2240void
2241vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2242{
2243	int b;
2244	int i;
2245
2246	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2247	/*
2248	 * Scan the valid bits looking for invalid sections that
2249	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2250	 * valid bit may be set ) have already been zerod by
2251	 * vm_page_set_validclean().
2252	 */
2253	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2254		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2255		    (m->valid & (1 << i))
2256		) {
2257			if (i > b) {
2258				pmap_zero_page_area(m,
2259				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
2260			}
2261			b = i + 1;
2262		}
2263	}
2264
2265	/*
2266	 * setvalid is TRUE when we can safely set the zero'd areas
2267	 * as being valid.  We can do this if there are no cache consistancy
2268	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2269	 */
2270	if (setvalid)
2271		m->valid = VM_PAGE_BITS_ALL;
2272}
2273
2274/*
2275 *	vm_page_is_valid:
2276 *
2277 *	Is (partial) page valid?  Note that the case where size == 0
2278 *	will return FALSE in the degenerate case where the page is
2279 *	entirely invalid, and TRUE otherwise.
2280 *
2281 *	May not block.
2282 */
2283int
2284vm_page_is_valid(vm_page_t m, int base, int size)
2285{
2286	int bits = vm_page_bits(base, size);
2287
2288	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2289	if (m->valid && ((m->valid & bits) == bits))
2290		return 1;
2291	else
2292		return 0;
2293}
2294
2295/*
2296 * update dirty bits from pmap/mmu.  May not block.
2297 */
2298void
2299vm_page_test_dirty(vm_page_t m)
2300{
2301
2302	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2303	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
2304		vm_page_dirty(m);
2305}
2306
2307int so_zerocp_fullpage = 0;
2308
2309/*
2310 *	Replace the given page with a copy.  The copied page assumes
2311 *	the portion of the given page's "wire_count" that is not the
2312 *	responsibility of this copy-on-write mechanism.
2313 *
2314 *	The object containing the given page must have a non-zero
2315 *	paging-in-progress count and be locked.
2316 */
2317void
2318vm_page_cowfault(vm_page_t m)
2319{
2320	vm_page_t mnew;
2321	vm_object_t object;
2322	vm_pindex_t pindex;
2323
2324	mtx_assert(&vm_page_queue_mtx, MA_NOTOWNED);
2325	vm_page_lock_assert(m, MA_OWNED);
2326	object = m->object;
2327	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2328	KASSERT(object->paging_in_progress != 0,
2329	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2330	    object));
2331	pindex = m->pindex;
2332
2333 retry_alloc:
2334	pmap_remove_all(m);
2335	vm_page_remove(m);
2336	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
2337	if (mnew == NULL) {
2338		vm_page_insert(m, object, pindex);
2339		vm_page_unlock(m);
2340		VM_OBJECT_UNLOCK(object);
2341		VM_WAIT;
2342		VM_OBJECT_LOCK(object);
2343		if (m == vm_page_lookup(object, pindex)) {
2344			vm_page_lock(m);
2345			goto retry_alloc;
2346		} else {
2347			/*
2348			 * Page disappeared during the wait.
2349			 */
2350			return;
2351		}
2352	}
2353
2354	if (m->cow == 0) {
2355		/*
2356		 * check to see if we raced with an xmit complete when
2357		 * waiting to allocate a page.  If so, put things back
2358		 * the way they were
2359		 */
2360		vm_page_unlock(m);
2361		vm_page_lock(mnew);
2362		vm_page_free(mnew);
2363		vm_page_unlock(mnew);
2364		vm_page_insert(m, object, pindex);
2365	} else { /* clear COW & copy page */
2366		if (!so_zerocp_fullpage)
2367			pmap_copy_page(m, mnew);
2368		mnew->valid = VM_PAGE_BITS_ALL;
2369		vm_page_dirty(mnew);
2370		mnew->wire_count = m->wire_count - m->cow;
2371		m->wire_count = m->cow;
2372		vm_page_unlock(m);
2373	}
2374}
2375
2376void
2377vm_page_cowclear(vm_page_t m)
2378{
2379
2380	vm_page_lock_assert(m, MA_OWNED);
2381	if (m->cow) {
2382		m->cow--;
2383		/*
2384		 * let vm_fault add back write permission  lazily
2385		 */
2386	}
2387	/*
2388	 *  sf_buf_free() will free the page, so we needn't do it here
2389	 */
2390}
2391
2392int
2393vm_page_cowsetup(vm_page_t m)
2394{
2395
2396	vm_page_lock_assert(m, MA_OWNED);
2397	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
2398	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
2399		return (EBUSY);
2400	m->cow++;
2401	pmap_remove_write(m);
2402	VM_OBJECT_UNLOCK(m->object);
2403	return (0);
2404}
2405
2406#include "opt_ddb.h"
2407#ifdef DDB
2408#include <sys/kernel.h>
2409
2410#include <ddb/ddb.h>
2411
2412DB_SHOW_COMMAND(page, vm_page_print_page_info)
2413{
2414	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
2415	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
2416	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
2417	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
2418	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
2419	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
2420	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
2421	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
2422	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
2423	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
2424}
2425
2426DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
2427{
2428
2429	db_printf("PQ_FREE:");
2430	db_printf(" %d", cnt.v_free_count);
2431	db_printf("\n");
2432
2433	db_printf("PQ_CACHE:");
2434	db_printf(" %d", cnt.v_cache_count);
2435	db_printf("\n");
2436
2437	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
2438		*vm_page_queues[PQ_ACTIVE].cnt,
2439		*vm_page_queues[PQ_INACTIVE].cnt);
2440}
2441#endif /* DDB */
2442