vm_page.c revision 242402
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1998 Matthew Dillon.  All Rights Reserved.
5 *
6 * This code is derived from software contributed to Berkeley by
7 * The Mach Operating System project at Carnegie-Mellon University.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
34 */
35
36/*-
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
55 *  School of Computer Science
56 *  Carnegie Mellon University
57 *  Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63/*
64 *			GENERAL RULES ON VM_PAGE MANIPULATION
65 *
66 *	- a pageq mutex is required when adding or removing a page from a
67 *	  page queue (vm_page_queue[]), regardless of other mutexes or the
68 *	  busy state of a page.
69 *
70 *	- The object mutex is held when inserting or removing
71 *	  pages from an object (vm_page_insert() or vm_page_remove()).
72 *
73 */
74
75/*
76 *	Resident memory management module.
77 */
78
79#include <sys/cdefs.h>
80__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 242402 2012-10-31 18:07:18Z attilio $");
81
82#include "opt_vm.h"
83
84#include <sys/param.h>
85#include <sys/systm.h>
86#include <sys/lock.h>
87#include <sys/kernel.h>
88#include <sys/limits.h>
89#include <sys/malloc.h>
90#include <sys/msgbuf.h>
91#include <sys/mutex.h>
92#include <sys/proc.h>
93#include <sys/sysctl.h>
94#include <sys/vmmeter.h>
95#include <sys/vnode.h>
96
97#include <vm/vm.h>
98#include <vm/pmap.h>
99#include <vm/vm_param.h>
100#include <vm/vm_kern.h>
101#include <vm/vm_object.h>
102#include <vm/vm_page.h>
103#include <vm/vm_pageout.h>
104#include <vm/vm_pager.h>
105#include <vm/vm_phys.h>
106#include <vm/vm_reserv.h>
107#include <vm/vm_extern.h>
108#include <vm/uma.h>
109#include <vm/uma_int.h>
110
111#include <machine/md_var.h>
112
113/*
114 *	Associated with page of user-allocatable memory is a
115 *	page structure.
116 */
117
118struct vpgqueues vm_page_queues[PQ_COUNT];
119struct mtx_padalign vm_page_queue_mtx;
120struct mtx_padalign vm_page_queue_free_mtx;
121
122struct mtx_padalign pa_lock[PA_LOCK_COUNT];
123
124vm_page_t vm_page_array;
125long vm_page_array_size;
126long first_page;
127int vm_page_zero_count;
128
129static int boot_pages = UMA_BOOT_PAGES;
130TUNABLE_INT("vm.boot_pages", &boot_pages);
131SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
132	"number of pages allocated for bootstrapping the VM system");
133
134static int pa_tryrelock_restart;
135SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFLAG_RD,
136    &pa_tryrelock_restart, 0, "Number of tryrelock restarts");
137
138static uma_zone_t fakepg_zone;
139
140static struct vnode *vm_page_alloc_init(vm_page_t m);
141static void vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits);
142static void vm_page_queue_remove(int queue, vm_page_t m);
143static void vm_page_enqueue(int queue, vm_page_t m);
144static void vm_page_init_fakepg(void *dummy);
145
146SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL);
147
148static void
149vm_page_init_fakepg(void *dummy)
150{
151
152	fakepg_zone = uma_zcreate("fakepg", sizeof(struct vm_page), NULL, NULL,
153	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM);
154}
155
156/* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */
157#if PAGE_SIZE == 32768
158#ifdef CTASSERT
159CTASSERT(sizeof(u_long) >= 8);
160#endif
161#endif
162
163/*
164 * Try to acquire a physical address lock while a pmap is locked.  If we
165 * fail to trylock we unlock and lock the pmap directly and cache the
166 * locked pa in *locked.  The caller should then restart their loop in case
167 * the virtual to physical mapping has changed.
168 */
169int
170vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked)
171{
172	vm_paddr_t lockpa;
173
174	lockpa = *locked;
175	*locked = pa;
176	if (lockpa) {
177		PA_LOCK_ASSERT(lockpa, MA_OWNED);
178		if (PA_LOCKPTR(pa) == PA_LOCKPTR(lockpa))
179			return (0);
180		PA_UNLOCK(lockpa);
181	}
182	if (PA_TRYLOCK(pa))
183		return (0);
184	PMAP_UNLOCK(pmap);
185	atomic_add_int(&pa_tryrelock_restart, 1);
186	PA_LOCK(pa);
187	PMAP_LOCK(pmap);
188	return (EAGAIN);
189}
190
191/*
192 *	vm_set_page_size:
193 *
194 *	Sets the page size, perhaps based upon the memory
195 *	size.  Must be called before any use of page-size
196 *	dependent functions.
197 */
198void
199vm_set_page_size(void)
200{
201	if (cnt.v_page_size == 0)
202		cnt.v_page_size = PAGE_SIZE;
203	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
204		panic("vm_set_page_size: page size not a power of two");
205}
206
207/*
208 *	vm_page_blacklist_lookup:
209 *
210 *	See if a physical address in this page has been listed
211 *	in the blacklist tunable.  Entries in the tunable are
212 *	separated by spaces or commas.  If an invalid integer is
213 *	encountered then the rest of the string is skipped.
214 */
215static int
216vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
217{
218	vm_paddr_t bad;
219	char *cp, *pos;
220
221	for (pos = list; *pos != '\0'; pos = cp) {
222		bad = strtoq(pos, &cp, 0);
223		if (*cp != '\0') {
224			if (*cp == ' ' || *cp == ',') {
225				cp++;
226				if (cp == pos)
227					continue;
228			} else
229				break;
230		}
231		if (pa == trunc_page(bad))
232			return (1);
233	}
234	return (0);
235}
236
237/*
238 *	vm_page_startup:
239 *
240 *	Initializes the resident memory module.
241 *
242 *	Allocates memory for the page cells, and
243 *	for the object/offset-to-page hash table headers.
244 *	Each page cell is initialized and placed on the free list.
245 */
246vm_offset_t
247vm_page_startup(vm_offset_t vaddr)
248{
249	vm_offset_t mapped;
250	vm_paddr_t page_range;
251	vm_paddr_t new_end;
252	int i;
253	vm_paddr_t pa;
254	vm_paddr_t last_pa;
255	char *list;
256
257	/* the biggest memory array is the second group of pages */
258	vm_paddr_t end;
259	vm_paddr_t biggestsize;
260	vm_paddr_t low_water, high_water;
261	int biggestone;
262
263	biggestsize = 0;
264	biggestone = 0;
265	vaddr = round_page(vaddr);
266
267	for (i = 0; phys_avail[i + 1]; i += 2) {
268		phys_avail[i] = round_page(phys_avail[i]);
269		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
270	}
271
272	low_water = phys_avail[0];
273	high_water = phys_avail[1];
274
275	for (i = 0; phys_avail[i + 1]; i += 2) {
276		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
277
278		if (size > biggestsize) {
279			biggestone = i;
280			biggestsize = size;
281		}
282		if (phys_avail[i] < low_water)
283			low_water = phys_avail[i];
284		if (phys_avail[i + 1] > high_water)
285			high_water = phys_avail[i + 1];
286	}
287
288#ifdef XEN
289	low_water = 0;
290#endif
291
292	end = phys_avail[biggestone+1];
293
294	/*
295	 * Initialize the page and queue locks.
296	 */
297	mtx_init(&vm_page_queue_mtx, "vm page queue", NULL, MTX_DEF |
298	    MTX_RECURSE);
299	mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF);
300	for (i = 0; i < PA_LOCK_COUNT; i++)
301		mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF);
302
303	/*
304	 * Initialize the queue headers for the hold queue, the active queue,
305	 * and the inactive queue.
306	 */
307	for (i = 0; i < PQ_COUNT; i++)
308		TAILQ_INIT(&vm_page_queues[i].pl);
309	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
310	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
311
312	/*
313	 * Allocate memory for use when boot strapping the kernel memory
314	 * allocator.
315	 */
316	new_end = end - (boot_pages * UMA_SLAB_SIZE);
317	new_end = trunc_page(new_end);
318	mapped = pmap_map(&vaddr, new_end, end,
319	    VM_PROT_READ | VM_PROT_WRITE);
320	bzero((void *)mapped, end - new_end);
321	uma_startup((void *)mapped, boot_pages);
322
323#if defined(__amd64__) || defined(__i386__) || defined(__arm__) || \
324    defined(__mips__)
325	/*
326	 * Allocate a bitmap to indicate that a random physical page
327	 * needs to be included in a minidump.
328	 *
329	 * The amd64 port needs this to indicate which direct map pages
330	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
331	 *
332	 * However, i386 still needs this workspace internally within the
333	 * minidump code.  In theory, they are not needed on i386, but are
334	 * included should the sf_buf code decide to use them.
335	 */
336	last_pa = 0;
337	for (i = 0; dump_avail[i + 1] != 0; i += 2)
338		if (dump_avail[i + 1] > last_pa)
339			last_pa = dump_avail[i + 1];
340	page_range = last_pa / PAGE_SIZE;
341	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
342	new_end -= vm_page_dump_size;
343	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
344	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
345	bzero((void *)vm_page_dump, vm_page_dump_size);
346#endif
347#ifdef __amd64__
348	/*
349	 * Request that the physical pages underlying the message buffer be
350	 * included in a crash dump.  Since the message buffer is accessed
351	 * through the direct map, they are not automatically included.
352	 */
353	pa = DMAP_TO_PHYS((vm_offset_t)msgbufp->msg_ptr);
354	last_pa = pa + round_page(msgbufsize);
355	while (pa < last_pa) {
356		dump_add_page(pa);
357		pa += PAGE_SIZE;
358	}
359#endif
360	/*
361	 * Compute the number of pages of memory that will be available for
362	 * use (taking into account the overhead of a page structure per
363	 * page).
364	 */
365	first_page = low_water / PAGE_SIZE;
366#ifdef VM_PHYSSEG_SPARSE
367	page_range = 0;
368	for (i = 0; phys_avail[i + 1] != 0; i += 2)
369		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
370#elif defined(VM_PHYSSEG_DENSE)
371	page_range = high_water / PAGE_SIZE - first_page;
372#else
373#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
374#endif
375	end = new_end;
376
377	/*
378	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
379	 */
380	vaddr += PAGE_SIZE;
381
382	/*
383	 * Initialize the mem entry structures now, and put them in the free
384	 * queue.
385	 */
386	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
387	mapped = pmap_map(&vaddr, new_end, end,
388	    VM_PROT_READ | VM_PROT_WRITE);
389	vm_page_array = (vm_page_t) mapped;
390#if VM_NRESERVLEVEL > 0
391	/*
392	 * Allocate memory for the reservation management system's data
393	 * structures.
394	 */
395	new_end = vm_reserv_startup(&vaddr, new_end, high_water);
396#endif
397#if defined(__amd64__) || defined(__mips__)
398	/*
399	 * pmap_map on amd64 and mips can come out of the direct-map, not kvm
400	 * like i386, so the pages must be tracked for a crashdump to include
401	 * this data.  This includes the vm_page_array and the early UMA
402	 * bootstrap pages.
403	 */
404	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
405		dump_add_page(pa);
406#endif
407	phys_avail[biggestone + 1] = new_end;
408
409	/*
410	 * Clear all of the page structures
411	 */
412	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
413	for (i = 0; i < page_range; i++)
414		vm_page_array[i].order = VM_NFREEORDER;
415	vm_page_array_size = page_range;
416
417	/*
418	 * Initialize the physical memory allocator.
419	 */
420	vm_phys_init();
421
422	/*
423	 * Add every available physical page that is not blacklisted to
424	 * the free lists.
425	 */
426	cnt.v_page_count = 0;
427	cnt.v_free_count = 0;
428	list = getenv("vm.blacklist");
429	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
430		pa = phys_avail[i];
431		last_pa = phys_avail[i + 1];
432		while (pa < last_pa) {
433			if (list != NULL &&
434			    vm_page_blacklist_lookup(list, pa))
435				printf("Skipping page with pa 0x%jx\n",
436				    (uintmax_t)pa);
437			else
438				vm_phys_add_page(pa);
439			pa += PAGE_SIZE;
440		}
441	}
442	freeenv(list);
443#if VM_NRESERVLEVEL > 0
444	/*
445	 * Initialize the reservation management system.
446	 */
447	vm_reserv_init();
448#endif
449	return (vaddr);
450}
451
452void
453vm_page_reference(vm_page_t m)
454{
455
456	vm_page_aflag_set(m, PGA_REFERENCED);
457}
458
459void
460vm_page_busy(vm_page_t m)
461{
462
463	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
464	KASSERT((m->oflags & VPO_BUSY) == 0,
465	    ("vm_page_busy: page already busy!!!"));
466	m->oflags |= VPO_BUSY;
467}
468
469/*
470 *      vm_page_flash:
471 *
472 *      wakeup anyone waiting for the page.
473 */
474void
475vm_page_flash(vm_page_t m)
476{
477
478	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
479	if (m->oflags & VPO_WANTED) {
480		m->oflags &= ~VPO_WANTED;
481		wakeup(m);
482	}
483}
484
485/*
486 *      vm_page_wakeup:
487 *
488 *      clear the VPO_BUSY flag and wakeup anyone waiting for the
489 *      page.
490 *
491 */
492void
493vm_page_wakeup(vm_page_t m)
494{
495
496	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
497	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
498	m->oflags &= ~VPO_BUSY;
499	vm_page_flash(m);
500}
501
502void
503vm_page_io_start(vm_page_t m)
504{
505
506	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
507	m->busy++;
508}
509
510void
511vm_page_io_finish(vm_page_t m)
512{
513
514	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
515	KASSERT(m->busy > 0, ("vm_page_io_finish: page %p is not busy", m));
516	m->busy--;
517	if (m->busy == 0)
518		vm_page_flash(m);
519}
520
521/*
522 * Keep page from being freed by the page daemon
523 * much of the same effect as wiring, except much lower
524 * overhead and should be used only for *very* temporary
525 * holding ("wiring").
526 */
527void
528vm_page_hold(vm_page_t mem)
529{
530
531	vm_page_lock_assert(mem, MA_OWNED);
532        mem->hold_count++;
533}
534
535void
536vm_page_unhold(vm_page_t mem)
537{
538
539	vm_page_lock_assert(mem, MA_OWNED);
540	--mem->hold_count;
541	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
542	if (mem->hold_count == 0 && (mem->flags & PG_UNHOLDFREE) != 0)
543		vm_page_free_toq(mem);
544}
545
546/*
547 *	vm_page_unhold_pages:
548 *
549 *	Unhold each of the pages that is referenced by the given array.
550 */
551void
552vm_page_unhold_pages(vm_page_t *ma, int count)
553{
554	struct mtx *mtx, *new_mtx;
555
556	mtx = NULL;
557	for (; count != 0; count--) {
558		/*
559		 * Avoid releasing and reacquiring the same page lock.
560		 */
561		new_mtx = vm_page_lockptr(*ma);
562		if (mtx != new_mtx) {
563			if (mtx != NULL)
564				mtx_unlock(mtx);
565			mtx = new_mtx;
566			mtx_lock(mtx);
567		}
568		vm_page_unhold(*ma);
569		ma++;
570	}
571	if (mtx != NULL)
572		mtx_unlock(mtx);
573}
574
575vm_page_t
576PHYS_TO_VM_PAGE(vm_paddr_t pa)
577{
578	vm_page_t m;
579
580#ifdef VM_PHYSSEG_SPARSE
581	m = vm_phys_paddr_to_vm_page(pa);
582	if (m == NULL)
583		m = vm_phys_fictitious_to_vm_page(pa);
584	return (m);
585#elif defined(VM_PHYSSEG_DENSE)
586	long pi;
587
588	pi = atop(pa);
589	if (pi >= first_page && (pi - first_page) < vm_page_array_size) {
590		m = &vm_page_array[pi - first_page];
591		return (m);
592	}
593	return (vm_phys_fictitious_to_vm_page(pa));
594#else
595#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
596#endif
597}
598
599/*
600 *	vm_page_getfake:
601 *
602 *	Create a fictitious page with the specified physical address and
603 *	memory attribute.  The memory attribute is the only the machine-
604 *	dependent aspect of a fictitious page that must be initialized.
605 */
606vm_page_t
607vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr)
608{
609	vm_page_t m;
610
611	m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO);
612	vm_page_initfake(m, paddr, memattr);
613	return (m);
614}
615
616void
617vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
618{
619
620	if ((m->flags & PG_FICTITIOUS) != 0) {
621		/*
622		 * The page's memattr might have changed since the
623		 * previous initialization.  Update the pmap to the
624		 * new memattr.
625		 */
626		goto memattr;
627	}
628	m->phys_addr = paddr;
629	m->queue = PQ_NONE;
630	/* Fictitious pages don't use "segind". */
631	m->flags = PG_FICTITIOUS;
632	/* Fictitious pages don't use "order" or "pool". */
633	m->oflags = VPO_BUSY | VPO_UNMANAGED;
634	m->wire_count = 1;
635memattr:
636	pmap_page_set_memattr(m, memattr);
637}
638
639/*
640 *	vm_page_putfake:
641 *
642 *	Release a fictitious page.
643 */
644void
645vm_page_putfake(vm_page_t m)
646{
647
648	KASSERT((m->oflags & VPO_UNMANAGED) != 0, ("managed %p", m));
649	KASSERT((m->flags & PG_FICTITIOUS) != 0,
650	    ("vm_page_putfake: bad page %p", m));
651	uma_zfree(fakepg_zone, m);
652}
653
654/*
655 *	vm_page_updatefake:
656 *
657 *	Update the given fictitious page to the specified physical address and
658 *	memory attribute.
659 */
660void
661vm_page_updatefake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr)
662{
663
664	KASSERT((m->flags & PG_FICTITIOUS) != 0,
665	    ("vm_page_updatefake: bad page %p", m));
666	m->phys_addr = paddr;
667	pmap_page_set_memattr(m, memattr);
668}
669
670/*
671 *	vm_page_free:
672 *
673 *	Free a page.
674 */
675void
676vm_page_free(vm_page_t m)
677{
678
679	m->flags &= ~PG_ZERO;
680	vm_page_free_toq(m);
681}
682
683/*
684 *	vm_page_free_zero:
685 *
686 *	Free a page to the zerod-pages queue
687 */
688void
689vm_page_free_zero(vm_page_t m)
690{
691
692	m->flags |= PG_ZERO;
693	vm_page_free_toq(m);
694}
695
696/*
697 * Unbusy and handle the page queueing for a page from the VOP_GETPAGES()
698 * array which is not the request page.
699 */
700void
701vm_page_readahead_finish(vm_page_t m)
702{
703
704	if (m->valid != 0) {
705		/*
706		 * Since the page is not the requested page, whether
707		 * it should be activated or deactivated is not
708		 * obvious.  Empirical results have shown that
709		 * deactivating the page is usually the best choice,
710		 * unless the page is wanted by another thread.
711		 */
712		if (m->oflags & VPO_WANTED) {
713			vm_page_lock(m);
714			vm_page_activate(m);
715			vm_page_unlock(m);
716		} else {
717			vm_page_lock(m);
718			vm_page_deactivate(m);
719			vm_page_unlock(m);
720		}
721		vm_page_wakeup(m);
722	} else {
723		/*
724		 * Free the completely invalid page.  Such page state
725		 * occurs due to the short read operation which did
726		 * not covered our page at all, or in case when a read
727		 * error happens.
728		 */
729		vm_page_lock(m);
730		vm_page_free(m);
731		vm_page_unlock(m);
732	}
733}
734
735/*
736 *	vm_page_sleep:
737 *
738 *	Sleep and release the page lock.
739 *
740 *	The object containing the given page must be locked.
741 */
742void
743vm_page_sleep(vm_page_t m, const char *msg)
744{
745
746	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
747	if (mtx_owned(vm_page_lockptr(m)))
748		vm_page_unlock(m);
749
750	/*
751	 * It's possible that while we sleep, the page will get
752	 * unbusied and freed.  If we are holding the object
753	 * lock, we will assume we hold a reference to the object
754	 * such that even if m->object changes, we can re-lock
755	 * it.
756	 */
757	m->oflags |= VPO_WANTED;
758	msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
759}
760
761/*
762 *	vm_page_dirty_KBI:		[ internal use only ]
763 *
764 *	Set all bits in the page's dirty field.
765 *
766 *	The object containing the specified page must be locked if the
767 *	call is made from the machine-independent layer.
768 *
769 *	See vm_page_clear_dirty_mask().
770 *
771 *	This function should only be called by vm_page_dirty().
772 */
773void
774vm_page_dirty_KBI(vm_page_t m)
775{
776
777	/* These assertions refer to this operation by its public name. */
778	KASSERT((m->flags & PG_CACHED) == 0,
779	    ("vm_page_dirty: page in cache!"));
780	KASSERT(!VM_PAGE_IS_FREE(m),
781	    ("vm_page_dirty: page is free!"));
782	KASSERT(m->valid == VM_PAGE_BITS_ALL,
783	    ("vm_page_dirty: page is invalid!"));
784	m->dirty = VM_PAGE_BITS_ALL;
785}
786
787/*
788 *	vm_page_splay:
789 *
790 *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
791 *	the vm_page containing the given pindex.  If, however, that
792 *	pindex is not found in the vm_object, returns a vm_page that is
793 *	adjacent to the pindex, coming before or after it.
794 */
795vm_page_t
796vm_page_splay(vm_pindex_t pindex, vm_page_t root)
797{
798	struct vm_page dummy;
799	vm_page_t lefttreemax, righttreemin, y;
800
801	if (root == NULL)
802		return (root);
803	lefttreemax = righttreemin = &dummy;
804	for (;; root = y) {
805		if (pindex < root->pindex) {
806			if ((y = root->left) == NULL)
807				break;
808			if (pindex < y->pindex) {
809				/* Rotate right. */
810				root->left = y->right;
811				y->right = root;
812				root = y;
813				if ((y = root->left) == NULL)
814					break;
815			}
816			/* Link into the new root's right tree. */
817			righttreemin->left = root;
818			righttreemin = root;
819		} else if (pindex > root->pindex) {
820			if ((y = root->right) == NULL)
821				break;
822			if (pindex > y->pindex) {
823				/* Rotate left. */
824				root->right = y->left;
825				y->left = root;
826				root = y;
827				if ((y = root->right) == NULL)
828					break;
829			}
830			/* Link into the new root's left tree. */
831			lefttreemax->right = root;
832			lefttreemax = root;
833		} else
834			break;
835	}
836	/* Assemble the new root. */
837	lefttreemax->right = root->left;
838	righttreemin->left = root->right;
839	root->left = dummy.right;
840	root->right = dummy.left;
841	return (root);
842}
843
844/*
845 *	vm_page_insert:		[ internal use only ]
846 *
847 *	Inserts the given mem entry into the object and object list.
848 *
849 *	The pagetables are not updated but will presumably fault the page
850 *	in if necessary, or if a kernel page the caller will at some point
851 *	enter the page into the kernel's pmap.  We are not allowed to sleep
852 *	here so we *can't* do this anyway.
853 *
854 *	The object must be locked.
855 */
856void
857vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
858{
859	vm_page_t root;
860
861	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
862	if (m->object != NULL)
863		panic("vm_page_insert: page already inserted");
864
865	/*
866	 * Record the object/offset pair in this page
867	 */
868	m->object = object;
869	m->pindex = pindex;
870
871	/*
872	 * Now link into the object's ordered list of backed pages.
873	 */
874	root = object->root;
875	if (root == NULL) {
876		m->left = NULL;
877		m->right = NULL;
878		TAILQ_INSERT_TAIL(&object->memq, m, listq);
879	} else {
880		root = vm_page_splay(pindex, root);
881		if (pindex < root->pindex) {
882			m->left = root->left;
883			m->right = root;
884			root->left = NULL;
885			TAILQ_INSERT_BEFORE(root, m, listq);
886		} else if (pindex == root->pindex)
887			panic("vm_page_insert: offset already allocated");
888		else {
889			m->right = root->right;
890			m->left = root;
891			root->right = NULL;
892			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
893		}
894	}
895	object->root = m;
896
897	/*
898	 * Show that the object has one more resident page.
899	 */
900	object->resident_page_count++;
901
902	/*
903	 * Hold the vnode until the last page is released.
904	 */
905	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
906		vhold(object->handle);
907
908	/*
909	 * Since we are inserting a new and possibly dirty page,
910	 * update the object's OBJ_MIGHTBEDIRTY flag.
911	 */
912	if (pmap_page_is_write_mapped(m))
913		vm_object_set_writeable_dirty(object);
914}
915
916/*
917 *	vm_page_remove:
918 *
919 *	Removes the given mem entry from the object/offset-page
920 *	table and the object page list, but do not invalidate/terminate
921 *	the backing store.
922 *
923 *	The underlying pmap entry (if any) is NOT removed here.
924 *
925 *	The object must be locked.  The page must be locked if it is managed.
926 */
927void
928vm_page_remove(vm_page_t m)
929{
930	vm_object_t object;
931	vm_page_t next, prev, root;
932
933	if ((m->oflags & VPO_UNMANAGED) == 0)
934		vm_page_lock_assert(m, MA_OWNED);
935	if ((object = m->object) == NULL)
936		return;
937	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
938	if (m->oflags & VPO_BUSY) {
939		m->oflags &= ~VPO_BUSY;
940		vm_page_flash(m);
941	}
942
943	/*
944	 * Now remove from the object's list of backed pages.
945	 */
946	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
947		/*
948		 * Since the page's successor in the list is also its parent
949		 * in the tree, its right subtree must be empty.
950		 */
951		next->left = m->left;
952		KASSERT(m->right == NULL,
953		    ("vm_page_remove: page %p has right child", m));
954	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
955	    prev->right == m) {
956		/*
957		 * Since the page's predecessor in the list is also its parent
958		 * in the tree, its left subtree must be empty.
959		 */
960		KASSERT(m->left == NULL,
961		    ("vm_page_remove: page %p has left child", m));
962		prev->right = m->right;
963	} else {
964		if (m != object->root)
965			vm_page_splay(m->pindex, object->root);
966		if (m->left == NULL)
967			root = m->right;
968		else if (m->right == NULL)
969			root = m->left;
970		else {
971			/*
972			 * Move the page's successor to the root, because
973			 * pages are usually removed in ascending order.
974			 */
975			if (m->right != next)
976				vm_page_splay(m->pindex, m->right);
977			next->left = m->left;
978			root = next;
979		}
980		object->root = root;
981	}
982	TAILQ_REMOVE(&object->memq, m, listq);
983
984	/*
985	 * And show that the object has one fewer resident page.
986	 */
987	object->resident_page_count--;
988
989	/*
990	 * The vnode may now be recycled.
991	 */
992	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
993		vdrop(object->handle);
994
995	m->object = NULL;
996}
997
998/*
999 *	vm_page_lookup:
1000 *
1001 *	Returns the page associated with the object/offset
1002 *	pair specified; if none is found, NULL is returned.
1003 *
1004 *	The object must be locked.
1005 */
1006vm_page_t
1007vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
1008{
1009	vm_page_t m;
1010
1011	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1012	if ((m = object->root) != NULL && m->pindex != pindex) {
1013		m = vm_page_splay(pindex, m);
1014		if ((object->root = m)->pindex != pindex)
1015			m = NULL;
1016	}
1017	return (m);
1018}
1019
1020/*
1021 *	vm_page_find_least:
1022 *
1023 *	Returns the page associated with the object with least pindex
1024 *	greater than or equal to the parameter pindex, or NULL.
1025 *
1026 *	The object must be locked.
1027 */
1028vm_page_t
1029vm_page_find_least(vm_object_t object, vm_pindex_t pindex)
1030{
1031	vm_page_t m;
1032
1033	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1034	if ((m = TAILQ_FIRST(&object->memq)) != NULL) {
1035		if (m->pindex < pindex) {
1036			m = vm_page_splay(pindex, object->root);
1037			if ((object->root = m)->pindex < pindex)
1038				m = TAILQ_NEXT(m, listq);
1039		}
1040	}
1041	return (m);
1042}
1043
1044/*
1045 * Returns the given page's successor (by pindex) within the object if it is
1046 * resident; if none is found, NULL is returned.
1047 *
1048 * The object must be locked.
1049 */
1050vm_page_t
1051vm_page_next(vm_page_t m)
1052{
1053	vm_page_t next;
1054
1055	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1056	if ((next = TAILQ_NEXT(m, listq)) != NULL &&
1057	    next->pindex != m->pindex + 1)
1058		next = NULL;
1059	return (next);
1060}
1061
1062/*
1063 * Returns the given page's predecessor (by pindex) within the object if it is
1064 * resident; if none is found, NULL is returned.
1065 *
1066 * The object must be locked.
1067 */
1068vm_page_t
1069vm_page_prev(vm_page_t m)
1070{
1071	vm_page_t prev;
1072
1073	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1074	if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
1075	    prev->pindex != m->pindex - 1)
1076		prev = NULL;
1077	return (prev);
1078}
1079
1080/*
1081 *	vm_page_rename:
1082 *
1083 *	Move the given memory entry from its
1084 *	current object to the specified target object/offset.
1085 *
1086 *	Note: swap associated with the page must be invalidated by the move.  We
1087 *	      have to do this for several reasons:  (1) we aren't freeing the
1088 *	      page, (2) we are dirtying the page, (3) the VM system is probably
1089 *	      moving the page from object A to B, and will then later move
1090 *	      the backing store from A to B and we can't have a conflict.
1091 *
1092 *	Note: we *always* dirty the page.  It is necessary both for the
1093 *	      fact that we moved it, and because we may be invalidating
1094 *	      swap.  If the page is on the cache, we have to deactivate it
1095 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
1096 *	      on the cache.
1097 *
1098 *	The objects must be locked.  The page must be locked if it is managed.
1099 */
1100void
1101vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
1102{
1103
1104	vm_page_remove(m);
1105	vm_page_insert(m, new_object, new_pindex);
1106	vm_page_dirty(m);
1107}
1108
1109/*
1110 *	Convert all of the given object's cached pages that have a
1111 *	pindex within the given range into free pages.  If the value
1112 *	zero is given for "end", then the range's upper bound is
1113 *	infinity.  If the given object is backed by a vnode and it
1114 *	transitions from having one or more cached pages to none, the
1115 *	vnode's hold count is reduced.
1116 */
1117void
1118vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end)
1119{
1120	vm_page_t m, m_next;
1121	boolean_t empty;
1122
1123	mtx_lock(&vm_page_queue_free_mtx);
1124	if (__predict_false(object->cache == NULL)) {
1125		mtx_unlock(&vm_page_queue_free_mtx);
1126		return;
1127	}
1128	m = object->cache = vm_page_splay(start, object->cache);
1129	if (m->pindex < start) {
1130		if (m->right == NULL)
1131			m = NULL;
1132		else {
1133			m_next = vm_page_splay(start, m->right);
1134			m_next->left = m;
1135			m->right = NULL;
1136			m = object->cache = m_next;
1137		}
1138	}
1139
1140	/*
1141	 * At this point, "m" is either (1) a reference to the page
1142	 * with the least pindex that is greater than or equal to
1143	 * "start" or (2) NULL.
1144	 */
1145	for (; m != NULL && (m->pindex < end || end == 0); m = m_next) {
1146		/*
1147		 * Find "m"'s successor and remove "m" from the
1148		 * object's cache.
1149		 */
1150		if (m->right == NULL) {
1151			object->cache = m->left;
1152			m_next = NULL;
1153		} else {
1154			m_next = vm_page_splay(start, m->right);
1155			m_next->left = m->left;
1156			object->cache = m_next;
1157		}
1158		/* Convert "m" to a free page. */
1159		m->object = NULL;
1160		m->valid = 0;
1161		/* Clear PG_CACHED and set PG_FREE. */
1162		m->flags ^= PG_CACHED | PG_FREE;
1163		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
1164		    ("vm_page_cache_free: page %p has inconsistent flags", m));
1165		cnt.v_cache_count--;
1166		cnt.v_free_count++;
1167	}
1168	empty = object->cache == NULL;
1169	mtx_unlock(&vm_page_queue_free_mtx);
1170	if (object->type == OBJT_VNODE && empty)
1171		vdrop(object->handle);
1172}
1173
1174/*
1175 *	Returns the cached page that is associated with the given
1176 *	object and offset.  If, however, none exists, returns NULL.
1177 *
1178 *	The free page queue must be locked.
1179 */
1180static inline vm_page_t
1181vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
1182{
1183	vm_page_t m;
1184
1185	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1186	if ((m = object->cache) != NULL && m->pindex != pindex) {
1187		m = vm_page_splay(pindex, m);
1188		if ((object->cache = m)->pindex != pindex)
1189			m = NULL;
1190	}
1191	return (m);
1192}
1193
1194/*
1195 *	Remove the given cached page from its containing object's
1196 *	collection of cached pages.
1197 *
1198 *	The free page queue must be locked.
1199 */
1200static void
1201vm_page_cache_remove(vm_page_t m)
1202{
1203	vm_object_t object;
1204	vm_page_t root;
1205
1206	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1207	KASSERT((m->flags & PG_CACHED) != 0,
1208	    ("vm_page_cache_remove: page %p is not cached", m));
1209	object = m->object;
1210	if (m != object->cache) {
1211		root = vm_page_splay(m->pindex, object->cache);
1212		KASSERT(root == m,
1213		    ("vm_page_cache_remove: page %p is not cached in object %p",
1214		    m, object));
1215	}
1216	if (m->left == NULL)
1217		root = m->right;
1218	else if (m->right == NULL)
1219		root = m->left;
1220	else {
1221		root = vm_page_splay(m->pindex, m->left);
1222		root->right = m->right;
1223	}
1224	object->cache = root;
1225	m->object = NULL;
1226	cnt.v_cache_count--;
1227}
1228
1229/*
1230 *	Transfer all of the cached pages with offset greater than or
1231 *	equal to 'offidxstart' from the original object's cache to the
1232 *	new object's cache.  However, any cached pages with offset
1233 *	greater than or equal to the new object's size are kept in the
1234 *	original object.  Initially, the new object's cache must be
1235 *	empty.  Offset 'offidxstart' in the original object must
1236 *	correspond to offset zero in the new object.
1237 *
1238 *	The new object must be locked.
1239 */
1240void
1241vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
1242    vm_object_t new_object)
1243{
1244	vm_page_t m, m_next;
1245
1246	/*
1247	 * Insertion into an object's collection of cached pages
1248	 * requires the object to be locked.  In contrast, removal does
1249	 * not.
1250	 */
1251	VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
1252	KASSERT(new_object->cache == NULL,
1253	    ("vm_page_cache_transfer: object %p has cached pages",
1254	    new_object));
1255	mtx_lock(&vm_page_queue_free_mtx);
1256	if ((m = orig_object->cache) != NULL) {
1257		/*
1258		 * Transfer all of the pages with offset greater than or
1259		 * equal to 'offidxstart' from the original object's
1260		 * cache to the new object's cache.
1261		 */
1262		m = vm_page_splay(offidxstart, m);
1263		if (m->pindex < offidxstart) {
1264			orig_object->cache = m;
1265			new_object->cache = m->right;
1266			m->right = NULL;
1267		} else {
1268			orig_object->cache = m->left;
1269			new_object->cache = m;
1270			m->left = NULL;
1271		}
1272		while ((m = new_object->cache) != NULL) {
1273			if ((m->pindex - offidxstart) >= new_object->size) {
1274				/*
1275				 * Return all of the cached pages with
1276				 * offset greater than or equal to the
1277				 * new object's size to the original
1278				 * object's cache.
1279				 */
1280				new_object->cache = m->left;
1281				m->left = orig_object->cache;
1282				orig_object->cache = m;
1283				break;
1284			}
1285			m_next = vm_page_splay(m->pindex, m->right);
1286			/* Update the page's object and offset. */
1287			m->object = new_object;
1288			m->pindex -= offidxstart;
1289			if (m_next == NULL)
1290				break;
1291			m->right = NULL;
1292			m_next->left = m;
1293			new_object->cache = m_next;
1294		}
1295		KASSERT(new_object->cache == NULL ||
1296		    new_object->type == OBJT_SWAP,
1297		    ("vm_page_cache_transfer: object %p's type is incompatible"
1298		    " with cached pages", new_object));
1299	}
1300	mtx_unlock(&vm_page_queue_free_mtx);
1301}
1302
1303/*
1304 *	Returns TRUE if a cached page is associated with the given object and
1305 *	offset, and FALSE otherwise.
1306 *
1307 *	The object must be locked.
1308 */
1309boolean_t
1310vm_page_is_cached(vm_object_t object, vm_pindex_t pindex)
1311{
1312	vm_page_t m;
1313
1314	/*
1315	 * Insertion into an object's collection of cached pages requires the
1316	 * object to be locked.  Therefore, if the object is locked and the
1317	 * object's collection is empty, there is no need to acquire the free
1318	 * page queues lock in order to prove that the specified page doesn't
1319	 * exist.
1320	 */
1321	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1322	if (__predict_true(object->cache == NULL))
1323		return (FALSE);
1324	mtx_lock(&vm_page_queue_free_mtx);
1325	m = vm_page_cache_lookup(object, pindex);
1326	mtx_unlock(&vm_page_queue_free_mtx);
1327	return (m != NULL);
1328}
1329
1330/*
1331 *	vm_page_alloc:
1332 *
1333 *	Allocate and return a page that is associated with the specified
1334 *	object and offset pair.  By default, this page has the flag VPO_BUSY
1335 *	set.
1336 *
1337 *	The caller must always specify an allocation class.
1338 *
1339 *	allocation classes:
1340 *	VM_ALLOC_NORMAL		normal process request
1341 *	VM_ALLOC_SYSTEM		system *really* needs a page
1342 *	VM_ALLOC_INTERRUPT	interrupt time request
1343 *
1344 *	optional allocation flags:
1345 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1346 *				intends to allocate
1347 *	VM_ALLOC_IFCACHED	return page only if it is cached
1348 *	VM_ALLOC_IFNOTCACHED	return NULL, do not reactivate if the page
1349 *				is cached
1350 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1351 *	VM_ALLOC_NODUMP		do not include the page in a kernel core dump
1352 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1353 *				should not have the flag VPO_BUSY set
1354 *	VM_ALLOC_WIRED		wire the allocated page
1355 *	VM_ALLOC_ZERO		prefer a zeroed page
1356 *
1357 *	This routine may not sleep.
1358 */
1359vm_page_t
1360vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
1361{
1362	struct vnode *vp = NULL;
1363	vm_object_t m_object;
1364	vm_page_t m;
1365	int flags, req_class;
1366
1367	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1368	    ("vm_page_alloc: inconsistent object/req"));
1369	if (object != NULL)
1370		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1371
1372	req_class = req & VM_ALLOC_CLASS_MASK;
1373
1374	/*
1375	 * The page daemon is allowed to dig deeper into the free page list.
1376	 */
1377	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1378		req_class = VM_ALLOC_SYSTEM;
1379
1380	mtx_lock(&vm_page_queue_free_mtx);
1381	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1382	    (req_class == VM_ALLOC_SYSTEM &&
1383	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1384	    (req_class == VM_ALLOC_INTERRUPT &&
1385	    cnt.v_free_count + cnt.v_cache_count > 0)) {
1386		/*
1387		 * Allocate from the free queue if the number of free pages
1388		 * exceeds the minimum for the request class.
1389		 */
1390		if (object != NULL &&
1391		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1392			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1393				mtx_unlock(&vm_page_queue_free_mtx);
1394				return (NULL);
1395			}
1396			if (vm_phys_unfree_page(m))
1397				vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
1398#if VM_NRESERVLEVEL > 0
1399			else if (!vm_reserv_reactivate_page(m))
1400#else
1401			else
1402#endif
1403				panic("vm_page_alloc: cache page %p is missing"
1404				    " from the free queue", m);
1405		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1406			mtx_unlock(&vm_page_queue_free_mtx);
1407			return (NULL);
1408#if VM_NRESERVLEVEL > 0
1409		} else if (object == NULL || object->type == OBJT_DEVICE ||
1410		    object->type == OBJT_SG ||
1411		    (object->flags & OBJ_COLORED) == 0 ||
1412		    (m = vm_reserv_alloc_page(object, pindex)) == NULL) {
1413#else
1414		} else {
1415#endif
1416			m = vm_phys_alloc_pages(object != NULL ?
1417			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1418#if VM_NRESERVLEVEL > 0
1419			if (m == NULL && vm_reserv_reclaim_inactive()) {
1420				m = vm_phys_alloc_pages(object != NULL ?
1421				    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT,
1422				    0);
1423			}
1424#endif
1425		}
1426	} else {
1427		/*
1428		 * Not allocatable, give up.
1429		 */
1430		mtx_unlock(&vm_page_queue_free_mtx);
1431		atomic_add_int(&vm_pageout_deficit,
1432		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1433		pagedaemon_wakeup();
1434		return (NULL);
1435	}
1436
1437	/*
1438	 *  At this point we had better have found a good page.
1439	 */
1440	KASSERT(m != NULL, ("vm_page_alloc: missing page"));
1441	KASSERT(m->queue == PQ_NONE,
1442	    ("vm_page_alloc: page %p has unexpected queue %d", m, m->queue));
1443	KASSERT(m->wire_count == 0, ("vm_page_alloc: page %p is wired", m));
1444	KASSERT(m->hold_count == 0, ("vm_page_alloc: page %p is held", m));
1445	KASSERT(m->busy == 0, ("vm_page_alloc: page %p is busy", m));
1446	KASSERT(m->dirty == 0, ("vm_page_alloc: page %p is dirty", m));
1447	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1448	    ("vm_page_alloc: page %p has unexpected memattr %d", m,
1449	    pmap_page_get_memattr(m)));
1450	if ((m->flags & PG_CACHED) != 0) {
1451		KASSERT((m->flags & PG_ZERO) == 0,
1452		    ("vm_page_alloc: cached page %p is PG_ZERO", m));
1453		KASSERT(m->valid != 0,
1454		    ("vm_page_alloc: cached page %p is invalid", m));
1455		if (m->object == object && m->pindex == pindex)
1456	  		cnt.v_reactivated++;
1457		else
1458			m->valid = 0;
1459		m_object = m->object;
1460		vm_page_cache_remove(m);
1461		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1462			vp = m_object->handle;
1463	} else {
1464		KASSERT(VM_PAGE_IS_FREE(m),
1465		    ("vm_page_alloc: page %p is not free", m));
1466		KASSERT(m->valid == 0,
1467		    ("vm_page_alloc: free page %p is valid", m));
1468		cnt.v_free_count--;
1469	}
1470
1471	/*
1472	 * Only the PG_ZERO flag is inherited.  The PG_CACHED or PG_FREE flag
1473	 * must be cleared before the free page queues lock is released.
1474	 */
1475	flags = 0;
1476	if (req & VM_ALLOC_NODUMP)
1477		flags |= PG_NODUMP;
1478	if (m->flags & PG_ZERO) {
1479		vm_page_zero_count--;
1480		if (req & VM_ALLOC_ZERO)
1481			flags = PG_ZERO;
1482	}
1483	m->flags = flags;
1484	mtx_unlock(&vm_page_queue_free_mtx);
1485	m->aflags = 0;
1486	if (object == NULL || object->type == OBJT_PHYS)
1487		m->oflags = VPO_UNMANAGED;
1488	else
1489		m->oflags = 0;
1490	if ((req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ)) == 0)
1491		m->oflags |= VPO_BUSY;
1492	if (req & VM_ALLOC_WIRED) {
1493		/*
1494		 * The page lock is not required for wiring a page until that
1495		 * page is inserted into the object.
1496		 */
1497		atomic_add_int(&cnt.v_wire_count, 1);
1498		m->wire_count = 1;
1499	}
1500	m->act_count = 0;
1501
1502	if (object != NULL) {
1503		/* Ignore device objects; the pager sets "memattr" for them. */
1504		if (object->memattr != VM_MEMATTR_DEFAULT &&
1505		    object->type != OBJT_DEVICE && object->type != OBJT_SG)
1506			pmap_page_set_memattr(m, object->memattr);
1507		vm_page_insert(m, object, pindex);
1508	} else
1509		m->pindex = pindex;
1510
1511	/*
1512	 * The following call to vdrop() must come after the above call
1513	 * to vm_page_insert() in case both affect the same object and
1514	 * vnode.  Otherwise, the affected vnode's hold count could
1515	 * temporarily become zero.
1516	 */
1517	if (vp != NULL)
1518		vdrop(vp);
1519
1520	/*
1521	 * Don't wakeup too often - wakeup the pageout daemon when
1522	 * we would be nearly out of memory.
1523	 */
1524	if (vm_paging_needed())
1525		pagedaemon_wakeup();
1526
1527	return (m);
1528}
1529
1530/*
1531 *	vm_page_alloc_contig:
1532 *
1533 *	Allocate a contiguous set of physical pages of the given size "npages"
1534 *	from the free lists.  All of the physical pages must be at or above
1535 *	the given physical address "low" and below the given physical address
1536 *	"high".  The given value "alignment" determines the alignment of the
1537 *	first physical page in the set.  If the given value "boundary" is
1538 *	non-zero, then the set of physical pages cannot cross any physical
1539 *	address boundary that is a multiple of that value.  Both "alignment"
1540 *	and "boundary" must be a power of two.
1541 *
1542 *	If the specified memory attribute, "memattr", is VM_MEMATTR_DEFAULT,
1543 *	then the memory attribute setting for the physical pages is configured
1544 *	to the object's memory attribute setting.  Otherwise, the memory
1545 *	attribute setting for the physical pages is configured to "memattr",
1546 *	overriding the object's memory attribute setting.  However, if the
1547 *	object's memory attribute setting is not VM_MEMATTR_DEFAULT, then the
1548 *	memory attribute setting for the physical pages cannot be configured
1549 *	to VM_MEMATTR_DEFAULT.
1550 *
1551 *	The caller must always specify an allocation class.
1552 *
1553 *	allocation classes:
1554 *	VM_ALLOC_NORMAL		normal process request
1555 *	VM_ALLOC_SYSTEM		system *really* needs a page
1556 *	VM_ALLOC_INTERRUPT	interrupt time request
1557 *
1558 *	optional allocation flags:
1559 *	VM_ALLOC_NOBUSY		do not set the flag VPO_BUSY on the page
1560 *	VM_ALLOC_NOOBJ		page is not associated with an object and
1561 *				should not have the flag VPO_BUSY set
1562 *	VM_ALLOC_WIRED		wire the allocated page
1563 *	VM_ALLOC_ZERO		prefer a zeroed page
1564 *
1565 *	This routine may not sleep.
1566 */
1567vm_page_t
1568vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req,
1569    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
1570    vm_paddr_t boundary, vm_memattr_t memattr)
1571{
1572	struct vnode *drop;
1573	vm_page_t deferred_vdrop_list, m, m_ret;
1574	u_int flags, oflags;
1575	int req_class;
1576
1577	KASSERT((object != NULL) == ((req & VM_ALLOC_NOOBJ) == 0),
1578	    ("vm_page_alloc_contig: inconsistent object/req"));
1579	if (object != NULL) {
1580		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1581		KASSERT(object->type == OBJT_PHYS,
1582		    ("vm_page_alloc_contig: object %p isn't OBJT_PHYS",
1583		    object));
1584	}
1585	KASSERT(npages > 0, ("vm_page_alloc_contig: npages is zero"));
1586	req_class = req & VM_ALLOC_CLASS_MASK;
1587
1588	/*
1589	 * The page daemon is allowed to dig deeper into the free page list.
1590	 */
1591	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1592		req_class = VM_ALLOC_SYSTEM;
1593
1594	deferred_vdrop_list = NULL;
1595	mtx_lock(&vm_page_queue_free_mtx);
1596	if (cnt.v_free_count + cnt.v_cache_count >= npages +
1597	    cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM &&
1598	    cnt.v_free_count + cnt.v_cache_count >= npages +
1599	    cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT &&
1600	    cnt.v_free_count + cnt.v_cache_count >= npages)) {
1601#if VM_NRESERVLEVEL > 0
1602retry:
1603		if (object == NULL || (object->flags & OBJ_COLORED) == 0 ||
1604		    (m_ret = vm_reserv_alloc_contig(object, pindex, npages,
1605		    low, high, alignment, boundary)) == NULL)
1606#endif
1607			m_ret = vm_phys_alloc_contig(npages, low, high,
1608			    alignment, boundary);
1609	} else {
1610		mtx_unlock(&vm_page_queue_free_mtx);
1611		atomic_add_int(&vm_pageout_deficit, npages);
1612		pagedaemon_wakeup();
1613		return (NULL);
1614	}
1615	if (m_ret != NULL)
1616		for (m = m_ret; m < &m_ret[npages]; m++) {
1617			drop = vm_page_alloc_init(m);
1618			if (drop != NULL) {
1619				/*
1620				 * Enqueue the vnode for deferred vdrop().
1621				 *
1622				 * Once the pages are removed from the free
1623				 * page list, "pageq" can be safely abused to
1624				 * construct a short-lived list of vnodes.
1625				 */
1626				m->pageq.tqe_prev = (void *)drop;
1627				m->pageq.tqe_next = deferred_vdrop_list;
1628				deferred_vdrop_list = m;
1629			}
1630		}
1631	else {
1632#if VM_NRESERVLEVEL > 0
1633		if (vm_reserv_reclaim_contig(npages, low, high, alignment,
1634		    boundary))
1635			goto retry;
1636#endif
1637	}
1638	mtx_unlock(&vm_page_queue_free_mtx);
1639	if (m_ret == NULL)
1640		return (NULL);
1641
1642	/*
1643	 * Initialize the pages.  Only the PG_ZERO flag is inherited.
1644	 */
1645	flags = 0;
1646	if ((req & VM_ALLOC_ZERO) != 0)
1647		flags = PG_ZERO;
1648	if ((req & VM_ALLOC_NODUMP) != 0)
1649		flags |= PG_NODUMP;
1650	if ((req & VM_ALLOC_WIRED) != 0)
1651		atomic_add_int(&cnt.v_wire_count, npages);
1652	oflags = VPO_UNMANAGED;
1653	if (object != NULL) {
1654		if ((req & VM_ALLOC_NOBUSY) == 0)
1655			oflags |= VPO_BUSY;
1656		if (object->memattr != VM_MEMATTR_DEFAULT &&
1657		    memattr == VM_MEMATTR_DEFAULT)
1658			memattr = object->memattr;
1659	}
1660	for (m = m_ret; m < &m_ret[npages]; m++) {
1661		m->aflags = 0;
1662		m->flags = (m->flags | PG_NODUMP) & flags;
1663		if ((req & VM_ALLOC_WIRED) != 0)
1664			m->wire_count = 1;
1665		/* Unmanaged pages don't use "act_count". */
1666		m->oflags = oflags;
1667		if (memattr != VM_MEMATTR_DEFAULT)
1668			pmap_page_set_memattr(m, memattr);
1669		if (object != NULL)
1670			vm_page_insert(m, object, pindex);
1671		else
1672			m->pindex = pindex;
1673		pindex++;
1674	}
1675	while (deferred_vdrop_list != NULL) {
1676		vdrop((struct vnode *)deferred_vdrop_list->pageq.tqe_prev);
1677		deferred_vdrop_list = deferred_vdrop_list->pageq.tqe_next;
1678	}
1679	if (vm_paging_needed())
1680		pagedaemon_wakeup();
1681	return (m_ret);
1682}
1683
1684/*
1685 * Initialize a page that has been freshly dequeued from a freelist.
1686 * The caller has to drop the vnode returned, if it is not NULL.
1687 *
1688 * This function may only be used to initialize unmanaged pages.
1689 *
1690 * To be called with vm_page_queue_free_mtx held.
1691 */
1692static struct vnode *
1693vm_page_alloc_init(vm_page_t m)
1694{
1695	struct vnode *drop;
1696	vm_object_t m_object;
1697
1698	KASSERT(m->queue == PQ_NONE,
1699	    ("vm_page_alloc_init: page %p has unexpected queue %d",
1700	    m, m->queue));
1701	KASSERT(m->wire_count == 0,
1702	    ("vm_page_alloc_init: page %p is wired", m));
1703	KASSERT(m->hold_count == 0,
1704	    ("vm_page_alloc_init: page %p is held", m));
1705	KASSERT(m->busy == 0,
1706	    ("vm_page_alloc_init: page %p is busy", m));
1707	KASSERT(m->dirty == 0,
1708	    ("vm_page_alloc_init: page %p is dirty", m));
1709	KASSERT(pmap_page_get_memattr(m) == VM_MEMATTR_DEFAULT,
1710	    ("vm_page_alloc_init: page %p has unexpected memattr %d",
1711	    m, pmap_page_get_memattr(m)));
1712	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1713	drop = NULL;
1714	if ((m->flags & PG_CACHED) != 0) {
1715		KASSERT((m->flags & PG_ZERO) == 0,
1716		    ("vm_page_alloc_init: cached page %p is PG_ZERO", m));
1717		m->valid = 0;
1718		m_object = m->object;
1719		vm_page_cache_remove(m);
1720		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1721			drop = m_object->handle;
1722	} else {
1723		KASSERT(VM_PAGE_IS_FREE(m),
1724		    ("vm_page_alloc_init: page %p is not free", m));
1725		KASSERT(m->valid == 0,
1726		    ("vm_page_alloc_init: free page %p is valid", m));
1727		cnt.v_free_count--;
1728		if ((m->flags & PG_ZERO) != 0)
1729			vm_page_zero_count--;
1730	}
1731	/* Don't clear the PG_ZERO flag; we'll need it later. */
1732	m->flags &= PG_ZERO;
1733	return (drop);
1734}
1735
1736/*
1737 * 	vm_page_alloc_freelist:
1738 *
1739 *	Allocate a physical page from the specified free page list.
1740 *
1741 *	The caller must always specify an allocation class.
1742 *
1743 *	allocation classes:
1744 *	VM_ALLOC_NORMAL		normal process request
1745 *	VM_ALLOC_SYSTEM		system *really* needs a page
1746 *	VM_ALLOC_INTERRUPT	interrupt time request
1747 *
1748 *	optional allocation flags:
1749 *	VM_ALLOC_COUNT(number)	the number of additional pages that the caller
1750 *				intends to allocate
1751 *	VM_ALLOC_WIRED		wire the allocated page
1752 *	VM_ALLOC_ZERO		prefer a zeroed page
1753 *
1754 *	This routine may not sleep.
1755 */
1756vm_page_t
1757vm_page_alloc_freelist(int flind, int req)
1758{
1759	struct vnode *drop;
1760	vm_page_t m;
1761	u_int flags;
1762	int req_class;
1763
1764	req_class = req & VM_ALLOC_CLASS_MASK;
1765
1766	/*
1767	 * The page daemon is allowed to dig deeper into the free page list.
1768	 */
1769	if (curproc == pageproc && req_class != VM_ALLOC_INTERRUPT)
1770		req_class = VM_ALLOC_SYSTEM;
1771
1772	/*
1773	 * Do not allocate reserved pages unless the req has asked for it.
1774	 */
1775	mtx_lock(&vm_page_queue_free_mtx);
1776	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
1777	    (req_class == VM_ALLOC_SYSTEM &&
1778	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
1779	    (req_class == VM_ALLOC_INTERRUPT &&
1780	    cnt.v_free_count + cnt.v_cache_count > 0))
1781		m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0);
1782	else {
1783		mtx_unlock(&vm_page_queue_free_mtx);
1784		atomic_add_int(&vm_pageout_deficit,
1785		    max((u_int)req >> VM_ALLOC_COUNT_SHIFT, 1));
1786		pagedaemon_wakeup();
1787		return (NULL);
1788	}
1789	if (m == NULL) {
1790		mtx_unlock(&vm_page_queue_free_mtx);
1791		return (NULL);
1792	}
1793	drop = vm_page_alloc_init(m);
1794	mtx_unlock(&vm_page_queue_free_mtx);
1795
1796	/*
1797	 * Initialize the page.  Only the PG_ZERO flag is inherited.
1798	 */
1799	m->aflags = 0;
1800	flags = 0;
1801	if ((req & VM_ALLOC_ZERO) != 0)
1802		flags = PG_ZERO;
1803	m->flags &= flags;
1804	if ((req & VM_ALLOC_WIRED) != 0) {
1805		/*
1806		 * The page lock is not required for wiring a page that does
1807		 * not belong to an object.
1808		 */
1809		atomic_add_int(&cnt.v_wire_count, 1);
1810		m->wire_count = 1;
1811	}
1812	/* Unmanaged pages don't use "act_count". */
1813	m->oflags = VPO_UNMANAGED;
1814	if (drop != NULL)
1815		vdrop(drop);
1816	if (vm_paging_needed())
1817		pagedaemon_wakeup();
1818	return (m);
1819}
1820
1821/*
1822 *	vm_wait:	(also see VM_WAIT macro)
1823 *
1824 *	Sleep until free pages are available for allocation.
1825 *	- Called in various places before memory allocations.
1826 */
1827void
1828vm_wait(void)
1829{
1830
1831	mtx_lock(&vm_page_queue_free_mtx);
1832	if (curproc == pageproc) {
1833		vm_pageout_pages_needed = 1;
1834		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1835		    PDROP | PSWP, "VMWait", 0);
1836	} else {
1837		if (!vm_pages_needed) {
1838			vm_pages_needed = 1;
1839			wakeup(&vm_pages_needed);
1840		}
1841		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1842		    "vmwait", 0);
1843	}
1844}
1845
1846/*
1847 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1848 *
1849 *	Sleep until free pages are available for allocation.
1850 *	- Called only in vm_fault so that processes page faulting
1851 *	  can be easily tracked.
1852 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1853 *	  processes will be able to grab memory first.  Do not change
1854 *	  this balance without careful testing first.
1855 */
1856void
1857vm_waitpfault(void)
1858{
1859
1860	mtx_lock(&vm_page_queue_free_mtx);
1861	if (!vm_pages_needed) {
1862		vm_pages_needed = 1;
1863		wakeup(&vm_pages_needed);
1864	}
1865	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1866	    "pfault", 0);
1867}
1868
1869/*
1870 *	vm_page_queue_remove:
1871 *
1872 *	Remove the given page from the specified queue.
1873 *
1874 *	The page and page queues must be locked.
1875 */
1876static __inline void
1877vm_page_queue_remove(int queue, vm_page_t m)
1878{
1879	struct vpgqueues *pq;
1880
1881	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1882	vm_page_lock_assert(m, MA_OWNED);
1883	pq = &vm_page_queues[queue];
1884	TAILQ_REMOVE(&pq->pl, m, pageq);
1885	(*pq->cnt)--;
1886}
1887
1888/*
1889 *	vm_pageq_remove:
1890 *
1891 *	Remove a page from its queue.
1892 *
1893 *	The given page must be locked.
1894 */
1895void
1896vm_pageq_remove(vm_page_t m)
1897{
1898	int queue;
1899
1900	vm_page_lock_assert(m, MA_OWNED);
1901	if ((queue = m->queue) != PQ_NONE) {
1902		vm_page_lock_queues();
1903		m->queue = PQ_NONE;
1904		vm_page_queue_remove(queue, m);
1905		vm_page_unlock_queues();
1906	}
1907}
1908
1909/*
1910 *	vm_page_enqueue:
1911 *
1912 *	Add the given page to the specified queue.
1913 *
1914 *	The page queues must be locked.
1915 */
1916static void
1917vm_page_enqueue(int queue, vm_page_t m)
1918{
1919	struct vpgqueues *vpq;
1920
1921	vpq = &vm_page_queues[queue];
1922	m->queue = queue;
1923	TAILQ_INSERT_TAIL(&vpq->pl, m, pageq);
1924	++*vpq->cnt;
1925}
1926
1927/*
1928 *	vm_page_activate:
1929 *
1930 *	Put the specified page on the active list (if appropriate).
1931 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1932 *	mess with it.
1933 *
1934 *	The page must be locked.
1935 */
1936void
1937vm_page_activate(vm_page_t m)
1938{
1939	int queue;
1940
1941	vm_page_lock_assert(m, MA_OWNED);
1942	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1943	if ((queue = m->queue) != PQ_ACTIVE) {
1944		if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
1945			if (m->act_count < ACT_INIT)
1946				m->act_count = ACT_INIT;
1947			vm_page_lock_queues();
1948			if (queue != PQ_NONE)
1949				vm_page_queue_remove(queue, m);
1950			vm_page_enqueue(PQ_ACTIVE, m);
1951			vm_page_unlock_queues();
1952		} else
1953			KASSERT(queue == PQ_NONE,
1954			    ("vm_page_activate: wired page %p is queued", m));
1955	} else {
1956		if (m->act_count < ACT_INIT)
1957			m->act_count = ACT_INIT;
1958	}
1959}
1960
1961/*
1962 *	vm_page_free_wakeup:
1963 *
1964 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1965 *	routine is called when a page has been added to the cache or free
1966 *	queues.
1967 *
1968 *	The page queues must be locked.
1969 */
1970static inline void
1971vm_page_free_wakeup(void)
1972{
1973
1974	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1975	/*
1976	 * if pageout daemon needs pages, then tell it that there are
1977	 * some free.
1978	 */
1979	if (vm_pageout_pages_needed &&
1980	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1981		wakeup(&vm_pageout_pages_needed);
1982		vm_pageout_pages_needed = 0;
1983	}
1984	/*
1985	 * wakeup processes that are waiting on memory if we hit a
1986	 * high water mark. And wakeup scheduler process if we have
1987	 * lots of memory. this process will swapin processes.
1988	 */
1989	if (vm_pages_needed && !vm_page_count_min()) {
1990		vm_pages_needed = 0;
1991		wakeup(&cnt.v_free_count);
1992	}
1993}
1994
1995/*
1996 *	vm_page_free_toq:
1997 *
1998 *	Returns the given page to the free list,
1999 *	disassociating it with any VM object.
2000 *
2001 *	The object must be locked.  The page must be locked if it is managed.
2002 */
2003void
2004vm_page_free_toq(vm_page_t m)
2005{
2006
2007	if ((m->oflags & VPO_UNMANAGED) == 0) {
2008		vm_page_lock_assert(m, MA_OWNED);
2009		KASSERT(!pmap_page_is_mapped(m),
2010		    ("vm_page_free_toq: freeing mapped page %p", m));
2011	}
2012	PCPU_INC(cnt.v_tfree);
2013
2014	if (VM_PAGE_IS_FREE(m))
2015		panic("vm_page_free: freeing free page %p", m);
2016	else if (m->busy != 0)
2017		panic("vm_page_free: freeing busy page %p", m);
2018
2019	/*
2020	 * Unqueue, then remove page.  Note that we cannot destroy
2021	 * the page here because we do not want to call the pager's
2022	 * callback routine until after we've put the page on the
2023	 * appropriate free queue.
2024	 */
2025	if ((m->oflags & VPO_UNMANAGED) == 0)
2026		vm_pageq_remove(m);
2027	vm_page_remove(m);
2028
2029	/*
2030	 * If fictitious remove object association and
2031	 * return, otherwise delay object association removal.
2032	 */
2033	if ((m->flags & PG_FICTITIOUS) != 0) {
2034		return;
2035	}
2036
2037	m->valid = 0;
2038	vm_page_undirty(m);
2039
2040	if (m->wire_count != 0)
2041		panic("vm_page_free: freeing wired page %p", m);
2042	if (m->hold_count != 0) {
2043		m->flags &= ~PG_ZERO;
2044		KASSERT((m->flags & PG_UNHOLDFREE) == 0,
2045		    ("vm_page_free: freeing PG_UNHOLDFREE page %p", m));
2046		m->flags |= PG_UNHOLDFREE;
2047	} else {
2048		/*
2049		 * Restore the default memory attribute to the page.
2050		 */
2051		if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2052			pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2053
2054		/*
2055		 * Insert the page into the physical memory allocator's
2056		 * cache/free page queues.
2057		 */
2058		mtx_lock(&vm_page_queue_free_mtx);
2059		m->flags |= PG_FREE;
2060		cnt.v_free_count++;
2061#if VM_NRESERVLEVEL > 0
2062		if (!vm_reserv_free_page(m))
2063#else
2064		if (TRUE)
2065#endif
2066			vm_phys_free_pages(m, 0);
2067		if ((m->flags & PG_ZERO) != 0)
2068			++vm_page_zero_count;
2069		else
2070			vm_page_zero_idle_wakeup();
2071		vm_page_free_wakeup();
2072		mtx_unlock(&vm_page_queue_free_mtx);
2073	}
2074}
2075
2076/*
2077 *	vm_page_wire:
2078 *
2079 *	Mark this page as wired down by yet
2080 *	another map, removing it from paging queues
2081 *	as necessary.
2082 *
2083 *	If the page is fictitious, then its wire count must remain one.
2084 *
2085 *	The page must be locked.
2086 */
2087void
2088vm_page_wire(vm_page_t m)
2089{
2090
2091	/*
2092	 * Only bump the wire statistics if the page is not already wired,
2093	 * and only unqueue the page if it is on some queue (if it is unmanaged
2094	 * it is already off the queues).
2095	 */
2096	vm_page_lock_assert(m, MA_OWNED);
2097	if ((m->flags & PG_FICTITIOUS) != 0) {
2098		KASSERT(m->wire_count == 1,
2099		    ("vm_page_wire: fictitious page %p's wire count isn't one",
2100		    m));
2101		return;
2102	}
2103	if (m->wire_count == 0) {
2104		if ((m->oflags & VPO_UNMANAGED) == 0)
2105			vm_pageq_remove(m);
2106		atomic_add_int(&cnt.v_wire_count, 1);
2107	}
2108	m->wire_count++;
2109	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
2110}
2111
2112/*
2113 * vm_page_unwire:
2114 *
2115 * Release one wiring of the specified page, potentially enabling it to be
2116 * paged again.  If paging is enabled, then the value of the parameter
2117 * "activate" determines to which queue the page is added.  If "activate" is
2118 * non-zero, then the page is added to the active queue.  Otherwise, it is
2119 * added to the inactive queue.
2120 *
2121 * However, unless the page belongs to an object, it is not enqueued because
2122 * it cannot be paged out.
2123 *
2124 * If a page is fictitious, then its wire count must alway be one.
2125 *
2126 * A managed page must be locked.
2127 */
2128void
2129vm_page_unwire(vm_page_t m, int activate)
2130{
2131
2132	if ((m->oflags & VPO_UNMANAGED) == 0)
2133		vm_page_lock_assert(m, MA_OWNED);
2134	if ((m->flags & PG_FICTITIOUS) != 0) {
2135		KASSERT(m->wire_count == 1,
2136	    ("vm_page_unwire: fictitious page %p's wire count isn't one", m));
2137		return;
2138	}
2139	if (m->wire_count > 0) {
2140		m->wire_count--;
2141		if (m->wire_count == 0) {
2142			atomic_subtract_int(&cnt.v_wire_count, 1);
2143			if ((m->oflags & VPO_UNMANAGED) != 0 ||
2144			    m->object == NULL)
2145				return;
2146			if (!activate)
2147				m->flags &= ~PG_WINATCFLS;
2148			vm_page_lock_queues();
2149			vm_page_enqueue(activate ? PQ_ACTIVE : PQ_INACTIVE, m);
2150			vm_page_unlock_queues();
2151		}
2152	} else
2153		panic("vm_page_unwire: page %p's wire count is zero", m);
2154}
2155
2156/*
2157 * Move the specified page to the inactive queue.
2158 *
2159 * Many pages placed on the inactive queue should actually go
2160 * into the cache, but it is difficult to figure out which.  What
2161 * we do instead, if the inactive target is well met, is to put
2162 * clean pages at the head of the inactive queue instead of the tail.
2163 * This will cause them to be moved to the cache more quickly and
2164 * if not actively re-referenced, reclaimed more quickly.  If we just
2165 * stick these pages at the end of the inactive queue, heavy filesystem
2166 * meta-data accesses can cause an unnecessary paging load on memory bound
2167 * processes.  This optimization causes one-time-use metadata to be
2168 * reused more quickly.
2169 *
2170 * Normally athead is 0 resulting in LRU operation.  athead is set
2171 * to 1 if we want this page to be 'as if it were placed in the cache',
2172 * except without unmapping it from the process address space.
2173 *
2174 * The page must be locked.
2175 */
2176static inline void
2177_vm_page_deactivate(vm_page_t m, int athead)
2178{
2179	int queue;
2180
2181	vm_page_lock_assert(m, MA_OWNED);
2182
2183	/*
2184	 * Ignore if already inactive.
2185	 */
2186	if ((queue = m->queue) == PQ_INACTIVE)
2187		return;
2188	if (m->wire_count == 0 && (m->oflags & VPO_UNMANAGED) == 0) {
2189		m->flags &= ~PG_WINATCFLS;
2190		vm_page_lock_queues();
2191		if (queue != PQ_NONE)
2192			vm_page_queue_remove(queue, m);
2193		if (athead)
2194			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m,
2195			    pageq);
2196		else
2197			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m,
2198			    pageq);
2199		m->queue = PQ_INACTIVE;
2200		cnt.v_inactive_count++;
2201		vm_page_unlock_queues();
2202	}
2203}
2204
2205/*
2206 * Move the specified page to the inactive queue.
2207 *
2208 * The page must be locked.
2209 */
2210void
2211vm_page_deactivate(vm_page_t m)
2212{
2213
2214	_vm_page_deactivate(m, 0);
2215}
2216
2217/*
2218 * vm_page_try_to_cache:
2219 *
2220 * Returns 0 on failure, 1 on success
2221 */
2222int
2223vm_page_try_to_cache(vm_page_t m)
2224{
2225
2226	vm_page_lock_assert(m, MA_OWNED);
2227	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2228	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2229	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2230		return (0);
2231	pmap_remove_all(m);
2232	if (m->dirty)
2233		return (0);
2234	vm_page_cache(m);
2235	return (1);
2236}
2237
2238/*
2239 * vm_page_try_to_free()
2240 *
2241 *	Attempt to free the page.  If we cannot free it, we do nothing.
2242 *	1 is returned on success, 0 on failure.
2243 */
2244int
2245vm_page_try_to_free(vm_page_t m)
2246{
2247
2248	vm_page_lock_assert(m, MA_OWNED);
2249	if (m->object != NULL)
2250		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2251	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
2252	    (m->oflags & (VPO_BUSY | VPO_UNMANAGED)) != 0)
2253		return (0);
2254	pmap_remove_all(m);
2255	if (m->dirty)
2256		return (0);
2257	vm_page_free(m);
2258	return (1);
2259}
2260
2261/*
2262 * vm_page_cache
2263 *
2264 * Put the specified page onto the page cache queue (if appropriate).
2265 *
2266 * The object and page must be locked.
2267 */
2268void
2269vm_page_cache(vm_page_t m)
2270{
2271	vm_object_t object;
2272	vm_page_t next, prev, root;
2273
2274	vm_page_lock_assert(m, MA_OWNED);
2275	object = m->object;
2276	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2277	if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) || m->busy ||
2278	    m->hold_count || m->wire_count)
2279		panic("vm_page_cache: attempting to cache busy page");
2280	pmap_remove_all(m);
2281	if (m->dirty != 0)
2282		panic("vm_page_cache: page %p is dirty", m);
2283	if (m->valid == 0 || object->type == OBJT_DEFAULT ||
2284	    (object->type == OBJT_SWAP &&
2285	    !vm_pager_has_page(object, m->pindex, NULL, NULL))) {
2286		/*
2287		 * Hypothesis: A cache-elgible page belonging to a
2288		 * default object or swap object but without a backing
2289		 * store must be zero filled.
2290		 */
2291		vm_page_free(m);
2292		return;
2293	}
2294	KASSERT((m->flags & PG_CACHED) == 0,
2295	    ("vm_page_cache: page %p is already cached", m));
2296	PCPU_INC(cnt.v_tcached);
2297
2298	/*
2299	 * Remove the page from the paging queues.
2300	 */
2301	vm_pageq_remove(m);
2302
2303	/*
2304	 * Remove the page from the object's collection of resident
2305	 * pages.
2306	 */
2307	if ((next = TAILQ_NEXT(m, listq)) != NULL && next->left == m) {
2308		/*
2309		 * Since the page's successor in the list is also its parent
2310		 * in the tree, its right subtree must be empty.
2311		 */
2312		next->left = m->left;
2313		KASSERT(m->right == NULL,
2314		    ("vm_page_cache: page %p has right child", m));
2315	} else if ((prev = TAILQ_PREV(m, pglist, listq)) != NULL &&
2316	    prev->right == m) {
2317		/*
2318		 * Since the page's predecessor in the list is also its parent
2319		 * in the tree, its left subtree must be empty.
2320		 */
2321		KASSERT(m->left == NULL,
2322		    ("vm_page_cache: page %p has left child", m));
2323		prev->right = m->right;
2324	} else {
2325		if (m != object->root)
2326			vm_page_splay(m->pindex, object->root);
2327		if (m->left == NULL)
2328			root = m->right;
2329		else if (m->right == NULL)
2330			root = m->left;
2331		else {
2332			/*
2333			 * Move the page's successor to the root, because
2334			 * pages are usually removed in ascending order.
2335			 */
2336			if (m->right != next)
2337				vm_page_splay(m->pindex, m->right);
2338			next->left = m->left;
2339			root = next;
2340		}
2341		object->root = root;
2342	}
2343	TAILQ_REMOVE(&object->memq, m, listq);
2344	object->resident_page_count--;
2345
2346	/*
2347	 * Restore the default memory attribute to the page.
2348	 */
2349	if (pmap_page_get_memattr(m) != VM_MEMATTR_DEFAULT)
2350		pmap_page_set_memattr(m, VM_MEMATTR_DEFAULT);
2351
2352	/*
2353	 * Insert the page into the object's collection of cached pages
2354	 * and the physical memory allocator's cache/free page queues.
2355	 */
2356	m->flags &= ~PG_ZERO;
2357	mtx_lock(&vm_page_queue_free_mtx);
2358	m->flags |= PG_CACHED;
2359	cnt.v_cache_count++;
2360	root = object->cache;
2361	if (root == NULL) {
2362		m->left = NULL;
2363		m->right = NULL;
2364	} else {
2365		root = vm_page_splay(m->pindex, root);
2366		if (m->pindex < root->pindex) {
2367			m->left = root->left;
2368			m->right = root;
2369			root->left = NULL;
2370		} else if (__predict_false(m->pindex == root->pindex))
2371			panic("vm_page_cache: offset already cached");
2372		else {
2373			m->right = root->right;
2374			m->left = root;
2375			root->right = NULL;
2376		}
2377	}
2378	object->cache = m;
2379#if VM_NRESERVLEVEL > 0
2380	if (!vm_reserv_free_page(m)) {
2381#else
2382	if (TRUE) {
2383#endif
2384		vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
2385		vm_phys_free_pages(m, 0);
2386	}
2387	vm_page_free_wakeup();
2388	mtx_unlock(&vm_page_queue_free_mtx);
2389
2390	/*
2391	 * Increment the vnode's hold count if this is the object's only
2392	 * cached page.  Decrement the vnode's hold count if this was
2393	 * the object's only resident page.
2394	 */
2395	if (object->type == OBJT_VNODE) {
2396		if (root == NULL && object->resident_page_count != 0)
2397			vhold(object->handle);
2398		else if (root != NULL && object->resident_page_count == 0)
2399			vdrop(object->handle);
2400	}
2401}
2402
2403/*
2404 * vm_page_dontneed
2405 *
2406 *	Cache, deactivate, or do nothing as appropriate.  This routine
2407 *	is typically used by madvise() MADV_DONTNEED.
2408 *
2409 *	Generally speaking we want to move the page into the cache so
2410 *	it gets reused quickly.  However, this can result in a silly syndrome
2411 *	due to the page recycling too quickly.  Small objects will not be
2412 *	fully cached.  On the otherhand, if we move the page to the inactive
2413 *	queue we wind up with a problem whereby very large objects
2414 *	unnecessarily blow away our inactive and cache queues.
2415 *
2416 *	The solution is to move the pages based on a fixed weighting.  We
2417 *	either leave them alone, deactivate them, or move them to the cache,
2418 *	where moving them to the cache has the highest weighting.
2419 *	By forcing some pages into other queues we eventually force the
2420 *	system to balance the queues, potentially recovering other unrelated
2421 *	space from active.  The idea is to not force this to happen too
2422 *	often.
2423 *
2424 *	The object and page must be locked.
2425 */
2426void
2427vm_page_dontneed(vm_page_t m)
2428{
2429	int dnw;
2430	int head;
2431
2432	vm_page_lock_assert(m, MA_OWNED);
2433	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2434	dnw = PCPU_GET(dnweight);
2435	PCPU_INC(dnweight);
2436
2437	/*
2438	 * Occasionally leave the page alone.
2439	 */
2440	if ((dnw & 0x01F0) == 0 || m->queue == PQ_INACTIVE) {
2441		if (m->act_count >= ACT_INIT)
2442			--m->act_count;
2443		return;
2444	}
2445
2446	/*
2447	 * Clear any references to the page.  Otherwise, the page daemon will
2448	 * immediately reactivate the page.
2449	 *
2450	 * Perform the pmap_clear_reference() first.  Otherwise, a concurrent
2451	 * pmap operation, such as pmap_remove(), could clear a reference in
2452	 * the pmap and set PGA_REFERENCED on the page before the
2453	 * pmap_clear_reference() had completed.  Consequently, the page would
2454	 * appear referenced based upon an old reference that occurred before
2455	 * this function ran.
2456	 */
2457	pmap_clear_reference(m);
2458	vm_page_aflag_clear(m, PGA_REFERENCED);
2459
2460	if (m->dirty == 0 && pmap_is_modified(m))
2461		vm_page_dirty(m);
2462
2463	if (m->dirty || (dnw & 0x0070) == 0) {
2464		/*
2465		 * Deactivate the page 3 times out of 32.
2466		 */
2467		head = 0;
2468	} else {
2469		/*
2470		 * Cache the page 28 times out of every 32.  Note that
2471		 * the page is deactivated instead of cached, but placed
2472		 * at the head of the queue instead of the tail.
2473		 */
2474		head = 1;
2475	}
2476	_vm_page_deactivate(m, head);
2477}
2478
2479/*
2480 * Grab a page, waiting until we are waken up due to the page
2481 * changing state.  We keep on waiting, if the page continues
2482 * to be in the object.  If the page doesn't exist, first allocate it
2483 * and then conditionally zero it.
2484 *
2485 * The caller must always specify the VM_ALLOC_RETRY flag.  This is intended
2486 * to facilitate its eventual removal.
2487 *
2488 * This routine may sleep.
2489 *
2490 * The object must be locked on entry.  The lock will, however, be released
2491 * and reacquired if the routine sleeps.
2492 */
2493vm_page_t
2494vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
2495{
2496	vm_page_t m;
2497
2498	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2499	KASSERT((allocflags & VM_ALLOC_RETRY) != 0,
2500	    ("vm_page_grab: VM_ALLOC_RETRY is required"));
2501retrylookup:
2502	if ((m = vm_page_lookup(object, pindex)) != NULL) {
2503		if ((m->oflags & VPO_BUSY) != 0 ||
2504		    ((allocflags & VM_ALLOC_IGN_SBUSY) == 0 && m->busy != 0)) {
2505			/*
2506			 * Reference the page before unlocking and
2507			 * sleeping so that the page daemon is less
2508			 * likely to reclaim it.
2509			 */
2510			vm_page_aflag_set(m, PGA_REFERENCED);
2511			vm_page_sleep(m, "pgrbwt");
2512			goto retrylookup;
2513		} else {
2514			if ((allocflags & VM_ALLOC_WIRED) != 0) {
2515				vm_page_lock(m);
2516				vm_page_wire(m);
2517				vm_page_unlock(m);
2518			}
2519			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
2520				vm_page_busy(m);
2521			return (m);
2522		}
2523	}
2524	m = vm_page_alloc(object, pindex, allocflags & ~(VM_ALLOC_RETRY |
2525	    VM_ALLOC_IGN_SBUSY));
2526	if (m == NULL) {
2527		VM_OBJECT_UNLOCK(object);
2528		VM_WAIT;
2529		VM_OBJECT_LOCK(object);
2530		goto retrylookup;
2531	} else if (m->valid != 0)
2532		return (m);
2533	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
2534		pmap_zero_page(m);
2535	return (m);
2536}
2537
2538/*
2539 * Mapping function for valid or dirty bits in a page.
2540 *
2541 * Inputs are required to range within a page.
2542 */
2543vm_page_bits_t
2544vm_page_bits(int base, int size)
2545{
2546	int first_bit;
2547	int last_bit;
2548
2549	KASSERT(
2550	    base + size <= PAGE_SIZE,
2551	    ("vm_page_bits: illegal base/size %d/%d", base, size)
2552	);
2553
2554	if (size == 0)		/* handle degenerate case */
2555		return (0);
2556
2557	first_bit = base >> DEV_BSHIFT;
2558	last_bit = (base + size - 1) >> DEV_BSHIFT;
2559
2560	return (((vm_page_bits_t)2 << last_bit) -
2561	    ((vm_page_bits_t)1 << first_bit));
2562}
2563
2564/*
2565 *	vm_page_set_valid_range:
2566 *
2567 *	Sets portions of a page valid.  The arguments are expected
2568 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2569 *	of any partial chunks touched by the range.  The invalid portion of
2570 *	such chunks will be zeroed.
2571 *
2572 *	(base + size) must be less then or equal to PAGE_SIZE.
2573 */
2574void
2575vm_page_set_valid_range(vm_page_t m, int base, int size)
2576{
2577	int endoff, frag;
2578
2579	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2580	if (size == 0)	/* handle degenerate case */
2581		return;
2582
2583	/*
2584	 * If the base is not DEV_BSIZE aligned and the valid
2585	 * bit is clear, we have to zero out a portion of the
2586	 * first block.
2587	 */
2588	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2589	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
2590		pmap_zero_page_area(m, frag, base - frag);
2591
2592	/*
2593	 * If the ending offset is not DEV_BSIZE aligned and the
2594	 * valid bit is clear, we have to zero out a portion of
2595	 * the last block.
2596	 */
2597	endoff = base + size;
2598	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2599	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
2600		pmap_zero_page_area(m, endoff,
2601		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2602
2603	/*
2604	 * Assert that no previously invalid block that is now being validated
2605	 * is already dirty.
2606	 */
2607	KASSERT((~m->valid & vm_page_bits(base, size) & m->dirty) == 0,
2608	    ("vm_page_set_valid_range: page %p is dirty", m));
2609
2610	/*
2611	 * Set valid bits inclusive of any overlap.
2612	 */
2613	m->valid |= vm_page_bits(base, size);
2614}
2615
2616/*
2617 * Clear the given bits from the specified page's dirty field.
2618 */
2619static __inline void
2620vm_page_clear_dirty_mask(vm_page_t m, vm_page_bits_t pagebits)
2621{
2622	uintptr_t addr;
2623#if PAGE_SIZE < 16384
2624	int shift;
2625#endif
2626
2627	/*
2628	 * If the object is locked and the page is neither VPO_BUSY nor
2629	 * write mapped, then the page's dirty field cannot possibly be
2630	 * set by a concurrent pmap operation.
2631	 */
2632	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2633	if ((m->oflags & VPO_BUSY) == 0 && !pmap_page_is_write_mapped(m))
2634		m->dirty &= ~pagebits;
2635	else {
2636		/*
2637		 * The pmap layer can call vm_page_dirty() without
2638		 * holding a distinguished lock.  The combination of
2639		 * the object's lock and an atomic operation suffice
2640		 * to guarantee consistency of the page dirty field.
2641		 *
2642		 * For PAGE_SIZE == 32768 case, compiler already
2643		 * properly aligns the dirty field, so no forcible
2644		 * alignment is needed. Only require existence of
2645		 * atomic_clear_64 when page size is 32768.
2646		 */
2647		addr = (uintptr_t)&m->dirty;
2648#if PAGE_SIZE == 32768
2649		atomic_clear_64((uint64_t *)addr, pagebits);
2650#elif PAGE_SIZE == 16384
2651		atomic_clear_32((uint32_t *)addr, pagebits);
2652#else		/* PAGE_SIZE <= 8192 */
2653		/*
2654		 * Use a trick to perform a 32-bit atomic on the
2655		 * containing aligned word, to not depend on the existence
2656		 * of atomic_clear_{8, 16}.
2657		 */
2658		shift = addr & (sizeof(uint32_t) - 1);
2659#if BYTE_ORDER == BIG_ENDIAN
2660		shift = (sizeof(uint32_t) - sizeof(m->dirty) - shift) * NBBY;
2661#else
2662		shift *= NBBY;
2663#endif
2664		addr &= ~(sizeof(uint32_t) - 1);
2665		atomic_clear_32((uint32_t *)addr, pagebits << shift);
2666#endif		/* PAGE_SIZE */
2667	}
2668}
2669
2670/*
2671 *	vm_page_set_validclean:
2672 *
2673 *	Sets portions of a page valid and clean.  The arguments are expected
2674 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
2675 *	of any partial chunks touched by the range.  The invalid portion of
2676 *	such chunks will be zero'd.
2677 *
2678 *	(base + size) must be less then or equal to PAGE_SIZE.
2679 */
2680void
2681vm_page_set_validclean(vm_page_t m, int base, int size)
2682{
2683	vm_page_bits_t oldvalid, pagebits;
2684	int endoff, frag;
2685
2686	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2687	if (size == 0)	/* handle degenerate case */
2688		return;
2689
2690	/*
2691	 * If the base is not DEV_BSIZE aligned and the valid
2692	 * bit is clear, we have to zero out a portion of the
2693	 * first block.
2694	 */
2695	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
2696	    (m->valid & ((vm_page_bits_t)1 << (base >> DEV_BSHIFT))) == 0)
2697		pmap_zero_page_area(m, frag, base - frag);
2698
2699	/*
2700	 * If the ending offset is not DEV_BSIZE aligned and the
2701	 * valid bit is clear, we have to zero out a portion of
2702	 * the last block.
2703	 */
2704	endoff = base + size;
2705	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
2706	    (m->valid & ((vm_page_bits_t)1 << (endoff >> DEV_BSHIFT))) == 0)
2707		pmap_zero_page_area(m, endoff,
2708		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
2709
2710	/*
2711	 * Set valid, clear dirty bits.  If validating the entire
2712	 * page we can safely clear the pmap modify bit.  We also
2713	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
2714	 * takes a write fault on a MAP_NOSYNC memory area the flag will
2715	 * be set again.
2716	 *
2717	 * We set valid bits inclusive of any overlap, but we can only
2718	 * clear dirty bits for DEV_BSIZE chunks that are fully within
2719	 * the range.
2720	 */
2721	oldvalid = m->valid;
2722	pagebits = vm_page_bits(base, size);
2723	m->valid |= pagebits;
2724#if 0	/* NOT YET */
2725	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
2726		frag = DEV_BSIZE - frag;
2727		base += frag;
2728		size -= frag;
2729		if (size < 0)
2730			size = 0;
2731	}
2732	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
2733#endif
2734	if (base == 0 && size == PAGE_SIZE) {
2735		/*
2736		 * The page can only be modified within the pmap if it is
2737		 * mapped, and it can only be mapped if it was previously
2738		 * fully valid.
2739		 */
2740		if (oldvalid == VM_PAGE_BITS_ALL)
2741			/*
2742			 * Perform the pmap_clear_modify() first.  Otherwise,
2743			 * a concurrent pmap operation, such as
2744			 * pmap_protect(), could clear a modification in the
2745			 * pmap and set the dirty field on the page before
2746			 * pmap_clear_modify() had begun and after the dirty
2747			 * field was cleared here.
2748			 */
2749			pmap_clear_modify(m);
2750		m->dirty = 0;
2751		m->oflags &= ~VPO_NOSYNC;
2752	} else if (oldvalid != VM_PAGE_BITS_ALL)
2753		m->dirty &= ~pagebits;
2754	else
2755		vm_page_clear_dirty_mask(m, pagebits);
2756}
2757
2758void
2759vm_page_clear_dirty(vm_page_t m, int base, int size)
2760{
2761
2762	vm_page_clear_dirty_mask(m, vm_page_bits(base, size));
2763}
2764
2765/*
2766 *	vm_page_set_invalid:
2767 *
2768 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
2769 *	valid and dirty bits for the effected areas are cleared.
2770 */
2771void
2772vm_page_set_invalid(vm_page_t m, int base, int size)
2773{
2774	vm_page_bits_t bits;
2775
2776	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2777	KASSERT((m->oflags & VPO_BUSY) == 0,
2778	    ("vm_page_set_invalid: page %p is busy", m));
2779	bits = vm_page_bits(base, size);
2780	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
2781		pmap_remove_all(m);
2782	KASSERT(!pmap_page_is_mapped(m),
2783	    ("vm_page_set_invalid: page %p is mapped", m));
2784	m->valid &= ~bits;
2785	m->dirty &= ~bits;
2786}
2787
2788/*
2789 * vm_page_zero_invalid()
2790 *
2791 *	The kernel assumes that the invalid portions of a page contain
2792 *	garbage, but such pages can be mapped into memory by user code.
2793 *	When this occurs, we must zero out the non-valid portions of the
2794 *	page so user code sees what it expects.
2795 *
2796 *	Pages are most often semi-valid when the end of a file is mapped
2797 *	into memory and the file's size is not page aligned.
2798 */
2799void
2800vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
2801{
2802	int b;
2803	int i;
2804
2805	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2806	/*
2807	 * Scan the valid bits looking for invalid sections that
2808	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
2809	 * valid bit may be set ) have already been zerod by
2810	 * vm_page_set_validclean().
2811	 */
2812	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
2813		if (i == (PAGE_SIZE / DEV_BSIZE) ||
2814		    (m->valid & ((vm_page_bits_t)1 << i))) {
2815			if (i > b) {
2816				pmap_zero_page_area(m,
2817				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
2818			}
2819			b = i + 1;
2820		}
2821	}
2822
2823	/*
2824	 * setvalid is TRUE when we can safely set the zero'd areas
2825	 * as being valid.  We can do this if there are no cache consistancy
2826	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
2827	 */
2828	if (setvalid)
2829		m->valid = VM_PAGE_BITS_ALL;
2830}
2831
2832/*
2833 *	vm_page_is_valid:
2834 *
2835 *	Is (partial) page valid?  Note that the case where size == 0
2836 *	will return FALSE in the degenerate case where the page is
2837 *	entirely invalid, and TRUE otherwise.
2838 */
2839int
2840vm_page_is_valid(vm_page_t m, int base, int size)
2841{
2842	vm_page_bits_t bits;
2843
2844	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2845	bits = vm_page_bits(base, size);
2846	if (m->valid && ((m->valid & bits) == bits))
2847		return 1;
2848	else
2849		return 0;
2850}
2851
2852/*
2853 * Set the page's dirty bits if the page is modified.
2854 */
2855void
2856vm_page_test_dirty(vm_page_t m)
2857{
2858
2859	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
2860	if (m->dirty != VM_PAGE_BITS_ALL && pmap_is_modified(m))
2861		vm_page_dirty(m);
2862}
2863
2864void
2865vm_page_lock_KBI(vm_page_t m, const char *file, int line)
2866{
2867
2868	mtx_lock_flags_(vm_page_lockptr(m), 0, file, line);
2869}
2870
2871void
2872vm_page_unlock_KBI(vm_page_t m, const char *file, int line)
2873{
2874
2875	mtx_unlock_flags_(vm_page_lockptr(m), 0, file, line);
2876}
2877
2878int
2879vm_page_trylock_KBI(vm_page_t m, const char *file, int line)
2880{
2881
2882	return (mtx_trylock_flags_(vm_page_lockptr(m), 0, file, line));
2883}
2884
2885#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
2886void
2887vm_page_lock_assert_KBI(vm_page_t m, int a, const char *file, int line)
2888{
2889
2890	mtx_assert_(vm_page_lockptr(m), a, file, line);
2891}
2892#endif
2893
2894int so_zerocp_fullpage = 0;
2895
2896/*
2897 *	Replace the given page with a copy.  The copied page assumes
2898 *	the portion of the given page's "wire_count" that is not the
2899 *	responsibility of this copy-on-write mechanism.
2900 *
2901 *	The object containing the given page must have a non-zero
2902 *	paging-in-progress count and be locked.
2903 */
2904void
2905vm_page_cowfault(vm_page_t m)
2906{
2907	vm_page_t mnew;
2908	vm_object_t object;
2909	vm_pindex_t pindex;
2910
2911	vm_page_lock_assert(m, MA_OWNED);
2912	object = m->object;
2913	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2914	KASSERT(object->paging_in_progress != 0,
2915	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
2916	    object));
2917	pindex = m->pindex;
2918
2919 retry_alloc:
2920	pmap_remove_all(m);
2921	vm_page_remove(m);
2922	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
2923	if (mnew == NULL) {
2924		vm_page_insert(m, object, pindex);
2925		vm_page_unlock(m);
2926		VM_OBJECT_UNLOCK(object);
2927		VM_WAIT;
2928		VM_OBJECT_LOCK(object);
2929		if (m == vm_page_lookup(object, pindex)) {
2930			vm_page_lock(m);
2931			goto retry_alloc;
2932		} else {
2933			/*
2934			 * Page disappeared during the wait.
2935			 */
2936			return;
2937		}
2938	}
2939
2940	if (m->cow == 0) {
2941		/*
2942		 * check to see if we raced with an xmit complete when
2943		 * waiting to allocate a page.  If so, put things back
2944		 * the way they were
2945		 */
2946		vm_page_unlock(m);
2947		vm_page_lock(mnew);
2948		vm_page_free(mnew);
2949		vm_page_unlock(mnew);
2950		vm_page_insert(m, object, pindex);
2951	} else { /* clear COW & copy page */
2952		if (!so_zerocp_fullpage)
2953			pmap_copy_page(m, mnew);
2954		mnew->valid = VM_PAGE_BITS_ALL;
2955		vm_page_dirty(mnew);
2956		mnew->wire_count = m->wire_count - m->cow;
2957		m->wire_count = m->cow;
2958		vm_page_unlock(m);
2959	}
2960}
2961
2962void
2963vm_page_cowclear(vm_page_t m)
2964{
2965
2966	vm_page_lock_assert(m, MA_OWNED);
2967	if (m->cow) {
2968		m->cow--;
2969		/*
2970		 * let vm_fault add back write permission  lazily
2971		 */
2972	}
2973	/*
2974	 *  sf_buf_free() will free the page, so we needn't do it here
2975	 */
2976}
2977
2978int
2979vm_page_cowsetup(vm_page_t m)
2980{
2981
2982	vm_page_lock_assert(m, MA_OWNED);
2983	if ((m->flags & PG_FICTITIOUS) != 0 ||
2984	    (m->oflags & VPO_UNMANAGED) != 0 ||
2985	    m->cow == USHRT_MAX - 1 || !VM_OBJECT_TRYLOCK(m->object))
2986		return (EBUSY);
2987	m->cow++;
2988	pmap_remove_write(m);
2989	VM_OBJECT_UNLOCK(m->object);
2990	return (0);
2991}
2992
2993#ifdef INVARIANTS
2994void
2995vm_page_object_lock_assert(vm_page_t m)
2996{
2997
2998	/*
2999	 * Certain of the page's fields may only be modified by the
3000	 * holder of the containing object's lock or the setter of the
3001	 * page's VPO_BUSY flag.  Unfortunately, the setter of the
3002	 * VPO_BUSY flag is not recorded, and thus cannot be checked
3003	 * here.
3004	 */
3005	if (m->object != NULL && (m->oflags & VPO_BUSY) == 0)
3006		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
3007}
3008#endif
3009
3010#include "opt_ddb.h"
3011#ifdef DDB
3012#include <sys/kernel.h>
3013
3014#include <ddb/ddb.h>
3015
3016DB_SHOW_COMMAND(page, vm_page_print_page_info)
3017{
3018	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
3019	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
3020	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
3021	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
3022	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
3023	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
3024	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
3025	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
3026	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
3027	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
3028}
3029
3030DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
3031{
3032
3033	db_printf("PQ_FREE:");
3034	db_printf(" %d", cnt.v_free_count);
3035	db_printf("\n");
3036
3037	db_printf("PQ_CACHE:");
3038	db_printf(" %d", cnt.v_cache_count);
3039	db_printf("\n");
3040
3041	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
3042		*vm_page_queues[PQ_ACTIVE].cnt,
3043		*vm_page_queues[PQ_INACTIVE].cnt);
3044}
3045#endif /* DDB */
3046