vm_page.c revision 172317
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 4. Neither the name of the University nor the names of its contributors
17 *    may be used to endorse or promote products derived from this software
18 *    without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 *
32 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
33 */
34
35/*-
36 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
37 * All rights reserved.
38 *
39 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
40 *
41 * Permission to use, copy, modify and distribute this software and
42 * its documentation is hereby granted, provided that both the copyright
43 * notice and this permission notice appear in all copies of the
44 * software, derivative works or modified versions, and any portions
45 * thereof, and that both notices appear in supporting documentation.
46 *
47 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
48 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
49 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
50 *
51 * Carnegie Mellon requests users of this software to return to
52 *
53 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
54 *  School of Computer Science
55 *  Carnegie Mellon University
56 *  Pittsburgh PA 15213-3890
57 *
58 * any improvements or extensions that they make and grant Carnegie the
59 * rights to redistribute these changes.
60 */
61
62/*
63 *			GENERAL RULES ON VM_PAGE MANIPULATION
64 *
65 *	- a pageq mutex is required when adding or removing a page from a
66 *	  page queue (vm_page_queue[]), regardless of other mutexes or the
67 *	  busy state of a page.
68 *
69 *	- a hash chain mutex is required when associating or disassociating
70 *	  a page from the VM PAGE CACHE hash table (vm_page_buckets),
71 *	  regardless of other mutexes or the busy state of a page.
72 *
73 *	- either a hash chain mutex OR a busied page is required in order
74 *	  to modify the page flags.  A hash chain mutex must be obtained in
75 *	  order to busy a page.  A page's flags cannot be modified by a
76 *	  hash chain mutex if the page is marked busy.
77 *
78 *	- The object memq mutex is held when inserting or removing
79 *	  pages from an object (vm_page_insert() or vm_page_remove()).  This
80 *	  is different from the object's main mutex.
81 *
82 *	Generally speaking, you have to be aware of side effects when running
83 *	vm_page ops.  A vm_page_lookup() will return with the hash chain
84 *	locked, whether it was able to lookup the page or not.  vm_page_free(),
85 *	vm_page_cache(), vm_page_activate(), and a number of other routines
86 *	will release the hash chain mutex for you.  Intermediate manipulation
87 *	routines such as vm_page_flag_set() expect the hash chain to be held
88 *	on entry and the hash chain will remain held on return.
89 *
90 *	pageq scanning can only occur with the pageq in question locked.
91 *	We have a known bottleneck with the active queue, but the cache
92 *	and free queues are actually arrays already.
93 */
94
95/*
96 *	Resident memory management module.
97 */
98
99#include <sys/cdefs.h>
100__FBSDID("$FreeBSD: head/sys/vm/vm_page.c 172317 2007-09-25 06:25:06Z alc $");
101
102#include <sys/param.h>
103#include <sys/systm.h>
104#include <sys/lock.h>
105#include <sys/kernel.h>
106#include <sys/malloc.h>
107#include <sys/mutex.h>
108#include <sys/proc.h>
109#include <sys/sysctl.h>
110#include <sys/vmmeter.h>
111#include <sys/vnode.h>
112
113#include <vm/vm.h>
114#include <vm/vm_param.h>
115#include <vm/vm_kern.h>
116#include <vm/vm_object.h>
117#include <vm/vm_page.h>
118#include <vm/vm_pageout.h>
119#include <vm/vm_pager.h>
120#include <vm/vm_phys.h>
121#include <vm/vm_extern.h>
122#include <vm/uma.h>
123#include <vm/uma_int.h>
124
125#include <machine/md_var.h>
126
127/*
128 *	Associated with page of user-allocatable memory is a
129 *	page structure.
130 */
131
132struct mtx vm_page_queue_mtx;
133struct mtx vm_page_queue_free_mtx;
134
135vm_page_t vm_page_array = 0;
136int vm_page_array_size = 0;
137long first_page = 0;
138int vm_page_zero_count = 0;
139
140static int boot_pages = UMA_BOOT_PAGES;
141TUNABLE_INT("vm.boot_pages", &boot_pages);
142SYSCTL_INT(_vm, OID_AUTO, boot_pages, CTLFLAG_RD, &boot_pages, 0,
143	"number of pages allocated for bootstrapping the VM system");
144
145/*
146 *	vm_set_page_size:
147 *
148 *	Sets the page size, perhaps based upon the memory
149 *	size.  Must be called before any use of page-size
150 *	dependent functions.
151 */
152void
153vm_set_page_size(void)
154{
155	if (cnt.v_page_size == 0)
156		cnt.v_page_size = PAGE_SIZE;
157	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
158		panic("vm_set_page_size: page size not a power of two");
159}
160
161/*
162 *	vm_page_blacklist_lookup:
163 *
164 *	See if a physical address in this page has been listed
165 *	in the blacklist tunable.  Entries in the tunable are
166 *	separated by spaces or commas.  If an invalid integer is
167 *	encountered then the rest of the string is skipped.
168 */
169static int
170vm_page_blacklist_lookup(char *list, vm_paddr_t pa)
171{
172	vm_paddr_t bad;
173	char *cp, *pos;
174
175	for (pos = list; *pos != '\0'; pos = cp) {
176		bad = strtoq(pos, &cp, 0);
177		if (*cp != '\0') {
178			if (*cp == ' ' || *cp == ',') {
179				cp++;
180				if (cp == pos)
181					continue;
182			} else
183				break;
184		}
185		if (pa == trunc_page(bad))
186			return (1);
187	}
188	return (0);
189}
190
191/*
192 *	vm_page_startup:
193 *
194 *	Initializes the resident memory module.
195 *
196 *	Allocates memory for the page cells, and
197 *	for the object/offset-to-page hash table headers.
198 *	Each page cell is initialized and placed on the free list.
199 */
200vm_offset_t
201vm_page_startup(vm_offset_t vaddr)
202{
203	vm_offset_t mapped;
204	vm_size_t npages;
205	vm_paddr_t page_range;
206	vm_paddr_t new_end;
207	int i;
208	vm_paddr_t pa;
209	int nblocks;
210	vm_paddr_t last_pa;
211	char *list;
212
213	/* the biggest memory array is the second group of pages */
214	vm_paddr_t end;
215	vm_paddr_t biggestsize;
216	vm_paddr_t low_water, high_water;
217	int biggestone;
218
219	vm_paddr_t total;
220
221	total = 0;
222	biggestsize = 0;
223	biggestone = 0;
224	nblocks = 0;
225	vaddr = round_page(vaddr);
226
227	for (i = 0; phys_avail[i + 1]; i += 2) {
228		phys_avail[i] = round_page(phys_avail[i]);
229		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
230	}
231
232	low_water = phys_avail[0];
233	high_water = phys_avail[1];
234
235	for (i = 0; phys_avail[i + 1]; i += 2) {
236		vm_paddr_t size = phys_avail[i + 1] - phys_avail[i];
237
238		if (size > biggestsize) {
239			biggestone = i;
240			biggestsize = size;
241		}
242		if (phys_avail[i] < low_water)
243			low_water = phys_avail[i];
244		if (phys_avail[i + 1] > high_water)
245			high_water = phys_avail[i + 1];
246		++nblocks;
247		total += size;
248	}
249
250	end = phys_avail[biggestone+1];
251
252	/*
253	 * Initialize the locks.
254	 */
255	mtx_init(&vm_page_queue_mtx, "vm page queue mutex", NULL, MTX_DEF |
256	    MTX_RECURSE);
257	mtx_init(&vm_page_queue_free_mtx, "vm page queue free mutex", NULL,
258	    MTX_DEF);
259
260	/*
261	 * Initialize the queue headers for the free queue, the active queue
262	 * and the inactive queue.
263	 */
264	vm_pageq_init();
265
266	/*
267	 * Allocate memory for use when boot strapping the kernel memory
268	 * allocator.
269	 */
270	new_end = end - (boot_pages * UMA_SLAB_SIZE);
271	new_end = trunc_page(new_end);
272	mapped = pmap_map(&vaddr, new_end, end,
273	    VM_PROT_READ | VM_PROT_WRITE);
274	bzero((void *)mapped, end - new_end);
275	uma_startup((void *)mapped, boot_pages);
276
277#if defined(__amd64__) || defined(__i386__)
278	/*
279	 * Allocate a bitmap to indicate that a random physical page
280	 * needs to be included in a minidump.
281	 *
282	 * The amd64 port needs this to indicate which direct map pages
283	 * need to be dumped, via calls to dump_add_page()/dump_drop_page().
284	 *
285	 * However, i386 still needs this workspace internally within the
286	 * minidump code.  In theory, they are not needed on i386, but are
287	 * included should the sf_buf code decide to use them.
288	 */
289	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
290	vm_page_dump_size = round_page(roundup2(page_range, NBBY) / NBBY);
291	new_end -= vm_page_dump_size;
292	vm_page_dump = (void *)(uintptr_t)pmap_map(&vaddr, new_end,
293	    new_end + vm_page_dump_size, VM_PROT_READ | VM_PROT_WRITE);
294	bzero((void *)vm_page_dump, vm_page_dump_size);
295#endif
296	/*
297	 * Compute the number of pages of memory that will be available for
298	 * use (taking into account the overhead of a page structure per
299	 * page).
300	 */
301	first_page = low_water / PAGE_SIZE;
302#ifdef VM_PHYSSEG_SPARSE
303	page_range = 0;
304	for (i = 0; phys_avail[i + 1] != 0; i += 2)
305		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
306#elif defined(VM_PHYSSEG_DENSE)
307	page_range = high_water / PAGE_SIZE - first_page;
308#else
309#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined."
310#endif
311	npages = (total - (page_range * sizeof(struct vm_page)) -
312	    (end - new_end)) / PAGE_SIZE;
313	end = new_end;
314
315	/*
316	 * Reserve an unmapped guard page to trap access to vm_page_array[-1].
317	 */
318	vaddr += PAGE_SIZE;
319
320	/*
321	 * Initialize the mem entry structures now, and put them in the free
322	 * queue.
323	 */
324	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
325	mapped = pmap_map(&vaddr, new_end, end,
326	    VM_PROT_READ | VM_PROT_WRITE);
327	vm_page_array = (vm_page_t) mapped;
328#ifdef __amd64__
329	/*
330	 * pmap_map on amd64 comes out of the direct-map, not kvm like i386,
331	 * so the pages must be tracked for a crashdump to include this data.
332	 * This includes the vm_page_array and the early UMA bootstrap pages.
333	 */
334	for (pa = new_end; pa < phys_avail[biggestone + 1]; pa += PAGE_SIZE)
335		dump_add_page(pa);
336#endif
337	phys_avail[biggestone + 1] = new_end;
338
339	/*
340	 * Clear all of the page structures
341	 */
342	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
343	for (i = 0; i < page_range; i++)
344		vm_page_array[i].order = VM_NFREEORDER;
345	vm_page_array_size = page_range;
346
347	/*
348	 * This assertion tests the hypothesis that npages and total are
349	 * redundant.  XXX
350	 */
351	page_range = 0;
352	for (i = 0; phys_avail[i + 1] != 0; i += 2)
353		page_range += atop(phys_avail[i + 1] - phys_avail[i]);
354	KASSERT(page_range == npages,
355	    ("vm_page_startup: inconsistent page counts"));
356
357	/*
358	 * Initialize the physical memory allocator.
359	 */
360	vm_phys_init();
361
362	/*
363	 * Add every available physical page that is not blacklisted to
364	 * the free lists.
365	 */
366	cnt.v_page_count = 0;
367	cnt.v_free_count = 0;
368	list = getenv("vm.blacklist");
369	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
370		pa = phys_avail[i];
371		last_pa = phys_avail[i + 1];
372		while (pa < last_pa) {
373			if (list != NULL &&
374			    vm_page_blacklist_lookup(list, pa))
375				printf("Skipping page with pa 0x%jx\n",
376				    (uintmax_t)pa);
377			else
378				vm_phys_add_page(pa);
379			pa += PAGE_SIZE;
380		}
381	}
382	freeenv(list);
383	return (vaddr);
384}
385
386void
387vm_page_flag_set(vm_page_t m, unsigned short bits)
388{
389
390	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
391	m->flags |= bits;
392}
393
394void
395vm_page_flag_clear(vm_page_t m, unsigned short bits)
396{
397
398	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
399	m->flags &= ~bits;
400}
401
402void
403vm_page_busy(vm_page_t m)
404{
405
406	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
407	KASSERT((m->oflags & VPO_BUSY) == 0,
408	    ("vm_page_busy: page already busy!!!"));
409	m->oflags |= VPO_BUSY;
410}
411
412/*
413 *      vm_page_flash:
414 *
415 *      wakeup anyone waiting for the page.
416 */
417void
418vm_page_flash(vm_page_t m)
419{
420
421	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
422	if (m->oflags & VPO_WANTED) {
423		m->oflags &= ~VPO_WANTED;
424		wakeup(m);
425	}
426}
427
428/*
429 *      vm_page_wakeup:
430 *
431 *      clear the VPO_BUSY flag and wakeup anyone waiting for the
432 *      page.
433 *
434 */
435void
436vm_page_wakeup(vm_page_t m)
437{
438
439	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
440	KASSERT(m->oflags & VPO_BUSY, ("vm_page_wakeup: page not busy!!!"));
441	m->oflags &= ~VPO_BUSY;
442	vm_page_flash(m);
443}
444
445void
446vm_page_io_start(vm_page_t m)
447{
448
449	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
450	m->busy++;
451}
452
453void
454vm_page_io_finish(vm_page_t m)
455{
456
457	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
458	m->busy--;
459	if (m->busy == 0)
460		vm_page_flash(m);
461}
462
463/*
464 * Keep page from being freed by the page daemon
465 * much of the same effect as wiring, except much lower
466 * overhead and should be used only for *very* temporary
467 * holding ("wiring").
468 */
469void
470vm_page_hold(vm_page_t mem)
471{
472
473	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
474        mem->hold_count++;
475}
476
477void
478vm_page_unhold(vm_page_t mem)
479{
480
481	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
482	--mem->hold_count;
483	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
484	if (mem->hold_count == 0 && VM_PAGE_INQUEUE2(mem, PQ_HOLD))
485		vm_page_free_toq(mem);
486}
487
488/*
489 *	vm_page_free:
490 *
491 *	Free a page.
492 */
493void
494vm_page_free(vm_page_t m)
495{
496
497	m->flags &= ~PG_ZERO;
498	vm_page_free_toq(m);
499}
500
501/*
502 *	vm_page_free_zero:
503 *
504 *	Free a page to the zerod-pages queue
505 */
506void
507vm_page_free_zero(vm_page_t m)
508{
509
510	m->flags |= PG_ZERO;
511	vm_page_free_toq(m);
512}
513
514/*
515 *	vm_page_sleep:
516 *
517 *	Sleep and release the page queues lock.
518 *
519 *	The object containing the given page must be locked.
520 */
521void
522vm_page_sleep(vm_page_t m, const char *msg)
523{
524
525	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
526	if (!mtx_owned(&vm_page_queue_mtx))
527		vm_page_lock_queues();
528	vm_page_flag_set(m, PG_REFERENCED);
529	vm_page_unlock_queues();
530
531	/*
532	 * It's possible that while we sleep, the page will get
533	 * unbusied and freed.  If we are holding the object
534	 * lock, we will assume we hold a reference to the object
535	 * such that even if m->object changes, we can re-lock
536	 * it.
537	 */
538	m->oflags |= VPO_WANTED;
539	msleep(m, VM_OBJECT_MTX(m->object), PVM, msg, 0);
540}
541
542/*
543 *	vm_page_dirty:
544 *
545 *	make page all dirty
546 */
547void
548vm_page_dirty(vm_page_t m)
549{
550	KASSERT((m->flags & PG_CACHED) == 0,
551	    ("vm_page_dirty: page in cache!"));
552	KASSERT(!VM_PAGE_IS_FREE(m),
553	    ("vm_page_dirty: page is free!"));
554	m->dirty = VM_PAGE_BITS_ALL;
555}
556
557/*
558 *	vm_page_splay:
559 *
560 *	Implements Sleator and Tarjan's top-down splay algorithm.  Returns
561 *	the vm_page containing the given pindex.  If, however, that
562 *	pindex is not found in the vm_object, returns a vm_page that is
563 *	adjacent to the pindex, coming before or after it.
564 */
565vm_page_t
566vm_page_splay(vm_pindex_t pindex, vm_page_t root)
567{
568	struct vm_page dummy;
569	vm_page_t lefttreemax, righttreemin, y;
570
571	if (root == NULL)
572		return (root);
573	lefttreemax = righttreemin = &dummy;
574	for (;; root = y) {
575		if (pindex < root->pindex) {
576			if ((y = root->left) == NULL)
577				break;
578			if (pindex < y->pindex) {
579				/* Rotate right. */
580				root->left = y->right;
581				y->right = root;
582				root = y;
583				if ((y = root->left) == NULL)
584					break;
585			}
586			/* Link into the new root's right tree. */
587			righttreemin->left = root;
588			righttreemin = root;
589		} else if (pindex > root->pindex) {
590			if ((y = root->right) == NULL)
591				break;
592			if (pindex > y->pindex) {
593				/* Rotate left. */
594				root->right = y->left;
595				y->left = root;
596				root = y;
597				if ((y = root->right) == NULL)
598					break;
599			}
600			/* Link into the new root's left tree. */
601			lefttreemax->right = root;
602			lefttreemax = root;
603		} else
604			break;
605	}
606	/* Assemble the new root. */
607	lefttreemax->right = root->left;
608	righttreemin->left = root->right;
609	root->left = dummy.right;
610	root->right = dummy.left;
611	return (root);
612}
613
614/*
615 *	vm_page_insert:		[ internal use only ]
616 *
617 *	Inserts the given mem entry into the object and object list.
618 *
619 *	The pagetables are not updated but will presumably fault the page
620 *	in if necessary, or if a kernel page the caller will at some point
621 *	enter the page into the kernel's pmap.  We are not allowed to block
622 *	here so we *can't* do this anyway.
623 *
624 *	The object and page must be locked.
625 *	This routine may not block.
626 */
627void
628vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
629{
630	vm_page_t root;
631
632	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
633	if (m->object != NULL)
634		panic("vm_page_insert: page already inserted");
635
636	/*
637	 * Record the object/offset pair in this page
638	 */
639	m->object = object;
640	m->pindex = pindex;
641
642	/*
643	 * Now link into the object's ordered list of backed pages.
644	 */
645	root = object->root;
646	if (root == NULL) {
647		m->left = NULL;
648		m->right = NULL;
649		TAILQ_INSERT_TAIL(&object->memq, m, listq);
650	} else {
651		root = vm_page_splay(pindex, root);
652		if (pindex < root->pindex) {
653			m->left = root->left;
654			m->right = root;
655			root->left = NULL;
656			TAILQ_INSERT_BEFORE(root, m, listq);
657		} else if (pindex == root->pindex)
658			panic("vm_page_insert: offset already allocated");
659		else {
660			m->right = root->right;
661			m->left = root;
662			root->right = NULL;
663			TAILQ_INSERT_AFTER(&object->memq, root, m, listq);
664		}
665	}
666	object->root = m;
667	object->generation++;
668
669	/*
670	 * show that the object has one more resident page.
671	 */
672	object->resident_page_count++;
673	/*
674	 * Hold the vnode until the last page is released.
675	 */
676	if (object->resident_page_count == 1 && object->type == OBJT_VNODE)
677		vhold((struct vnode *)object->handle);
678
679	/*
680	 * Since we are inserting a new and possibly dirty page,
681	 * update the object's OBJ_MIGHTBEDIRTY flag.
682	 */
683	if (m->flags & PG_WRITEABLE)
684		vm_object_set_writeable_dirty(object);
685}
686
687/*
688 *	vm_page_remove:
689 *				NOTE: used by device pager as well -wfj
690 *
691 *	Removes the given mem entry from the object/offset-page
692 *	table and the object page list, but do not invalidate/terminate
693 *	the backing store.
694 *
695 *	The object and page must be locked.
696 *	The underlying pmap entry (if any) is NOT removed here.
697 *	This routine may not block.
698 */
699void
700vm_page_remove(vm_page_t m)
701{
702	vm_object_t object;
703	vm_page_t root;
704
705	if ((object = m->object) == NULL)
706		return;
707	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
708	if (m->oflags & VPO_BUSY) {
709		m->oflags &= ~VPO_BUSY;
710		vm_page_flash(m);
711	}
712	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
713
714	/*
715	 * Now remove from the object's list of backed pages.
716	 */
717	if (m != object->root)
718		vm_page_splay(m->pindex, object->root);
719	if (m->left == NULL)
720		root = m->right;
721	else {
722		root = vm_page_splay(m->pindex, m->left);
723		root->right = m->right;
724	}
725	object->root = root;
726	TAILQ_REMOVE(&object->memq, m, listq);
727
728	/*
729	 * And show that the object has one fewer resident page.
730	 */
731	object->resident_page_count--;
732	object->generation++;
733	/*
734	 * The vnode may now be recycled.
735	 */
736	if (object->resident_page_count == 0 && object->type == OBJT_VNODE)
737		vdrop((struct vnode *)object->handle);
738
739	m->object = NULL;
740}
741
742/*
743 *	vm_page_lookup:
744 *
745 *	Returns the page associated with the object/offset
746 *	pair specified; if none is found, NULL is returned.
747 *
748 *	The object must be locked.
749 *	This routine may not block.
750 *	This is a critical path routine
751 */
752vm_page_t
753vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
754{
755	vm_page_t m;
756
757	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
758	if ((m = object->root) != NULL && m->pindex != pindex) {
759		m = vm_page_splay(pindex, m);
760		if ((object->root = m)->pindex != pindex)
761			m = NULL;
762	}
763	return (m);
764}
765
766/*
767 *	vm_page_rename:
768 *
769 *	Move the given memory entry from its
770 *	current object to the specified target object/offset.
771 *
772 *	The object must be locked.
773 *	This routine may not block.
774 *
775 *	Note: swap associated with the page must be invalidated by the move.  We
776 *	      have to do this for several reasons:  (1) we aren't freeing the
777 *	      page, (2) we are dirtying the page, (3) the VM system is probably
778 *	      moving the page from object A to B, and will then later move
779 *	      the backing store from A to B and we can't have a conflict.
780 *
781 *	Note: we *always* dirty the page.  It is necessary both for the
782 *	      fact that we moved it, and because we may be invalidating
783 *	      swap.  If the page is on the cache, we have to deactivate it
784 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
785 *	      on the cache.
786 */
787void
788vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
789{
790
791	vm_page_remove(m);
792	vm_page_insert(m, new_object, new_pindex);
793	vm_page_dirty(m);
794}
795
796/*
797 *	Convert all of the cached pages belonging to the given object
798 *	into free pages.  If the given object has cached pages and is
799 *	backed by a vnode, reduce the vnode's hold count.
800 */
801void
802vm_page_cache_free(vm_object_t object)
803{
804	vm_page_t m, root;
805	boolean_t empty;
806
807	mtx_lock(&vm_page_queue_free_mtx);
808	empty = object->cache == NULL;
809	while ((m = object->cache) != NULL) {
810		if (m->left == NULL)
811			root = m->right;
812		else if (m->right == NULL)
813			root = m->left;
814		else {
815			root = vm_page_splay(m->pindex, m->left);
816			root->right = m->right;
817		}
818		m->object->cache = root;
819		m->object = NULL;
820		m->valid = 0;
821		/* Clear PG_CACHED and set PG_FREE. */
822		m->flags ^= PG_CACHED | PG_FREE;
823		KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
824		    ("vm_page_cache_free: page %p has inconsistent flags", m));
825		cnt.v_cache_count--;
826		cnt.v_free_count++;
827	}
828	mtx_unlock(&vm_page_queue_free_mtx);
829	if (object->type == OBJT_VNODE && !empty)
830		vdrop(object->handle);
831}
832
833/*
834 *	Returns the cached page that is associated with the given
835 *	object and offset.  If, however, none exists, returns NULL.
836 *
837 *	The free page queue must be locked.
838 */
839static inline vm_page_t
840vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex)
841{
842	vm_page_t m;
843
844	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
845	if ((m = object->cache) != NULL && m->pindex != pindex) {
846		m = vm_page_splay(pindex, m);
847		if ((object->cache = m)->pindex != pindex)
848			m = NULL;
849	}
850	return (m);
851}
852
853/*
854 *	Remove the given cached page from its containing object's
855 *	collection of cached pages.
856 *
857 *	The free page queue must be locked.
858 */
859void
860vm_page_cache_remove(vm_page_t m)
861{
862	vm_object_t object;
863	vm_page_t root;
864
865	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
866	KASSERT((m->flags & PG_CACHED) != 0,
867	    ("vm_page_cache_remove: page %p is not cached", m));
868	object = m->object;
869	if (m != object->cache) {
870		root = vm_page_splay(m->pindex, object->cache);
871		KASSERT(root == m,
872		    ("vm_page_cache_remove: page %p is not cached in object %p",
873		    m, object));
874	}
875	if (m->left == NULL)
876		root = m->right;
877	else if (m->right == NULL)
878		root = m->left;
879	else {
880		root = vm_page_splay(m->pindex, m->left);
881		root->right = m->right;
882	}
883	object->cache = root;
884	m->object = NULL;
885	cnt.v_cache_count--;
886}
887
888/*
889 *	Transfer all of the cached pages with offset greater than or
890 *	equal to 'offidxstart' from the original object's cache to the
891 *	new object's cache.  Initially, the new object's cache must be
892 *	empty.  Offset 'offidxstart' in the original object must
893 *	correspond to offset zero in the new object.
894 *
895 *	The new object must be locked.
896 */
897void
898vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart,
899    vm_object_t new_object)
900{
901	vm_page_t m, m_next;
902
903	/*
904	 * Insertion into an object's collection of cached pages
905	 * requires the object to be locked.  In contrast, removal does
906	 * not.
907	 */
908	VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED);
909	KASSERT(new_object->cache == NULL,
910	    ("vm_page_cache_transfer: object %p has cached pages",
911	    new_object));
912	mtx_lock(&vm_page_queue_free_mtx);
913	if ((m = orig_object->cache) != NULL) {
914		/*
915		 * Transfer all of the pages with offset greater than or
916		 * equal to 'offidxstart' from the original object's
917		 * cache to the new object's cache.
918		 */
919		m = vm_page_splay(offidxstart, m);
920		if (m->pindex < offidxstart) {
921			orig_object->cache = m;
922			new_object->cache = m->right;
923			m->right = NULL;
924		} else {
925			orig_object->cache = m->left;
926			new_object->cache = m;
927			m->left = NULL;
928		}
929		KASSERT(new_object->cache == NULL ||
930		    new_object->type == OBJT_SWAP,
931		    ("vm_page_cache_transfer: object %p's type is incompatible"
932		    " with cached pages", new_object));
933
934		/*
935		 * Update the object and offset of each page that was
936		 * transferred to the new object's cache.
937		 */
938		while ((m = new_object->cache) != NULL) {
939			m_next = vm_page_splay(m->pindex, m->right);
940			m->object = new_object;
941			m->pindex -= offidxstart;
942			if (m_next == NULL)
943				break;
944			m->right = NULL;
945			m_next->left = m;
946			new_object->cache = m_next;
947		}
948	}
949	mtx_unlock(&vm_page_queue_free_mtx);
950}
951
952/*
953 *	vm_page_alloc:
954 *
955 *	Allocate and return a memory cell associated
956 *	with this VM object/offset pair.
957 *
958 *	page_req classes:
959 *	VM_ALLOC_NORMAL		normal process request
960 *	VM_ALLOC_SYSTEM		system *really* needs a page
961 *	VM_ALLOC_INTERRUPT	interrupt time request
962 *	VM_ALLOC_ZERO		zero page
963 *
964 *	This routine may not block.
965 */
966vm_page_t
967vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req)
968{
969	struct vnode *vp = NULL;
970	vm_object_t m_object;
971	vm_page_t m;
972	int flags, page_req;
973
974	page_req = req & VM_ALLOC_CLASS_MASK;
975	KASSERT(curthread->td_intr_nesting_level == 0 ||
976	    page_req == VM_ALLOC_INTERRUPT,
977	    ("vm_page_alloc(NORMAL|SYSTEM) in interrupt context"));
978
979	if ((req & VM_ALLOC_NOOBJ) == 0) {
980		KASSERT(object != NULL,
981		    ("vm_page_alloc: NULL object."));
982		VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
983	}
984
985	/*
986	 * The pager is allowed to eat deeper into the free page list.
987	 */
988	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
989		page_req = VM_ALLOC_SYSTEM;
990	};
991
992	mtx_lock(&vm_page_queue_free_mtx);
993	if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved ||
994	    (page_req == VM_ALLOC_SYSTEM &&
995	    cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) ||
996	    (page_req == VM_ALLOC_INTERRUPT &&
997	    cnt.v_free_count + cnt.v_cache_count > 0)) {
998		/*
999		 * Allocate from the free queue if the number of free pages
1000		 * exceeds the minimum for the request class.
1001		 */
1002		if (object != NULL &&
1003		    (m = vm_page_cache_lookup(object, pindex)) != NULL) {
1004			if ((req & VM_ALLOC_IFNOTCACHED) != 0) {
1005				mtx_unlock(&vm_page_queue_free_mtx);
1006				return (NULL);
1007			}
1008			vm_phys_unfree_page(m);
1009		} else if ((req & VM_ALLOC_IFCACHED) != 0) {
1010			mtx_unlock(&vm_page_queue_free_mtx);
1011			return (NULL);
1012		} else
1013			m = vm_phys_alloc_pages(object != NULL ?
1014			    VM_FREEPOOL_DEFAULT : VM_FREEPOOL_DIRECT, 0);
1015	} else {
1016		/*
1017		 * Not allocatable, give up.
1018		 */
1019		mtx_unlock(&vm_page_queue_free_mtx);
1020		atomic_add_int(&vm_pageout_deficit, 1);
1021		pagedaemon_wakeup();
1022		return (NULL);
1023	}
1024
1025	/*
1026	 *  At this point we had better have found a good page.
1027	 */
1028
1029	KASSERT(
1030	    m != NULL,
1031	    ("vm_page_alloc(): missing page on free queue")
1032	);
1033	if ((m->flags & PG_CACHED) != 0) {
1034		KASSERT(m->valid != 0,
1035		    ("vm_page_alloc: cached page %p is invalid", m));
1036		if (m->object == object && m->pindex == pindex)
1037	  		cnt.v_reactivated++;
1038		else
1039			m->valid = 0;
1040		m_object = m->object;
1041		vm_page_cache_remove(m);
1042		if (m_object->type == OBJT_VNODE && m_object->cache == NULL)
1043			vp = m_object->handle;
1044	} else {
1045		KASSERT(VM_PAGE_IS_FREE(m),
1046		    ("vm_page_alloc: page %p is not free", m));
1047		KASSERT(m->valid == 0,
1048		    ("vm_page_alloc: free page %p is valid", m));
1049		cnt.v_free_count--;
1050	}
1051
1052	/*
1053	 * Initialize structure.  Only the PG_ZERO flag is inherited.
1054	 */
1055	flags = 0;
1056	if (m->flags & PG_ZERO) {
1057		vm_page_zero_count--;
1058		if (req & VM_ALLOC_ZERO)
1059			flags = PG_ZERO;
1060	}
1061	if (object != NULL && object->type == OBJT_PHYS)
1062		flags |= PG_UNMANAGED;
1063	m->flags = flags;
1064	if (req & (VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ))
1065		m->oflags = 0;
1066	else
1067		m->oflags = VPO_BUSY;
1068	if (req & VM_ALLOC_WIRED) {
1069		atomic_add_int(&cnt.v_wire_count, 1);
1070		m->wire_count = 1;
1071	} else
1072		m->wire_count = 0;
1073	m->hold_count = 0;
1074	m->act_count = 0;
1075	m->busy = 0;
1076	KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
1077	mtx_unlock(&vm_page_queue_free_mtx);
1078
1079	if ((req & VM_ALLOC_NOOBJ) == 0)
1080		vm_page_insert(m, object, pindex);
1081	else
1082		m->pindex = pindex;
1083
1084	/*
1085	 * The following call to vdrop() must come after the above call
1086	 * to vm_page_insert() in case both affect the same object and
1087	 * vnode.  Otherwise, the affected vnode's hold count could
1088	 * temporarily become zero.
1089	 */
1090	if (vp != NULL)
1091		vdrop(vp);
1092
1093	/*
1094	 * Don't wakeup too often - wakeup the pageout daemon when
1095	 * we would be nearly out of memory.
1096	 */
1097	if (vm_paging_needed())
1098		pagedaemon_wakeup();
1099
1100	return (m);
1101}
1102
1103/*
1104 *	vm_wait:	(also see VM_WAIT macro)
1105 *
1106 *	Block until free pages are available for allocation
1107 *	- Called in various places before memory allocations.
1108 */
1109void
1110vm_wait(void)
1111{
1112
1113	mtx_lock(&vm_page_queue_free_mtx);
1114	if (curproc == pageproc) {
1115		vm_pageout_pages_needed = 1;
1116		msleep(&vm_pageout_pages_needed, &vm_page_queue_free_mtx,
1117		    PDROP | PSWP, "VMWait", 0);
1118	} else {
1119		if (!vm_pages_needed) {
1120			vm_pages_needed = 1;
1121			wakeup(&vm_pages_needed);
1122		}
1123		msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM,
1124		    "vmwait", 0);
1125	}
1126}
1127
1128/*
1129 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
1130 *
1131 *	Block until free pages are available for allocation
1132 *	- Called only in vm_fault so that processes page faulting
1133 *	  can be easily tracked.
1134 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
1135 *	  processes will be able to grab memory first.  Do not change
1136 *	  this balance without careful testing first.
1137 */
1138void
1139vm_waitpfault(void)
1140{
1141
1142	mtx_lock(&vm_page_queue_free_mtx);
1143	if (!vm_pages_needed) {
1144		vm_pages_needed = 1;
1145		wakeup(&vm_pages_needed);
1146	}
1147	msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER,
1148	    "pfault", 0);
1149}
1150
1151/*
1152 *	vm_page_activate:
1153 *
1154 *	Put the specified page on the active list (if appropriate).
1155 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1156 *	mess with it.
1157 *
1158 *	The page queues must be locked.
1159 *	This routine may not block.
1160 */
1161void
1162vm_page_activate(vm_page_t m)
1163{
1164
1165	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1166	if (VM_PAGE_GETKNOWNQUEUE2(m) != PQ_ACTIVE) {
1167		vm_pageq_remove(m);
1168		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1169			if (m->act_count < ACT_INIT)
1170				m->act_count = ACT_INIT;
1171			vm_pageq_enqueue(PQ_ACTIVE, m);
1172		}
1173	} else {
1174		if (m->act_count < ACT_INIT)
1175			m->act_count = ACT_INIT;
1176	}
1177}
1178
1179/*
1180 *	vm_page_free_wakeup:
1181 *
1182 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1183 *	routine is called when a page has been added to the cache or free
1184 *	queues.
1185 *
1186 *	The page queues must be locked.
1187 *	This routine may not block.
1188 */
1189static inline void
1190vm_page_free_wakeup(void)
1191{
1192
1193	mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
1194	/*
1195	 * if pageout daemon needs pages, then tell it that there are
1196	 * some free.
1197	 */
1198	if (vm_pageout_pages_needed &&
1199	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1200		wakeup(&vm_pageout_pages_needed);
1201		vm_pageout_pages_needed = 0;
1202	}
1203	/*
1204	 * wakeup processes that are waiting on memory if we hit a
1205	 * high water mark. And wakeup scheduler process if we have
1206	 * lots of memory. this process will swapin processes.
1207	 */
1208	if (vm_pages_needed && !vm_page_count_min()) {
1209		vm_pages_needed = 0;
1210		wakeup(&cnt.v_free_count);
1211	}
1212}
1213
1214/*
1215 *	vm_page_free_toq:
1216 *
1217 *	Returns the given page to the free list,
1218 *	disassociating it with any VM object.
1219 *
1220 *	Object and page must be locked prior to entry.
1221 *	This routine may not block.
1222 */
1223
1224void
1225vm_page_free_toq(vm_page_t m)
1226{
1227
1228	if (VM_PAGE_GETQUEUE(m) != PQ_NONE)
1229		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1230	KASSERT(!pmap_page_is_mapped(m),
1231	    ("vm_page_free_toq: freeing mapped page %p", m));
1232	PCPU_INC(cnt.v_tfree);
1233
1234	if (m->busy || VM_PAGE_IS_FREE(m)) {
1235		printf(
1236		"vm_page_free: pindex(%lu), busy(%d), VPO_BUSY(%d), hold(%d)\n",
1237		    (u_long)m->pindex, m->busy, (m->oflags & VPO_BUSY) ? 1 : 0,
1238		    m->hold_count);
1239		if (VM_PAGE_IS_FREE(m))
1240			panic("vm_page_free: freeing free page");
1241		else
1242			panic("vm_page_free: freeing busy page");
1243	}
1244
1245	/*
1246	 * unqueue, then remove page.  Note that we cannot destroy
1247	 * the page here because we do not want to call the pager's
1248	 * callback routine until after we've put the page on the
1249	 * appropriate free queue.
1250	 */
1251	vm_pageq_remove(m);
1252	vm_page_remove(m);
1253
1254	/*
1255	 * If fictitious remove object association and
1256	 * return, otherwise delay object association removal.
1257	 */
1258	if ((m->flags & PG_FICTITIOUS) != 0) {
1259		return;
1260	}
1261
1262	m->valid = 0;
1263	vm_page_undirty(m);
1264
1265	if (m->wire_count != 0) {
1266		if (m->wire_count > 1) {
1267			panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1268				m->wire_count, (long)m->pindex);
1269		}
1270		panic("vm_page_free: freeing wired page");
1271	}
1272	if (m->hold_count != 0) {
1273		m->flags &= ~PG_ZERO;
1274		vm_pageq_enqueue(PQ_HOLD, m);
1275	} else {
1276		m->flags |= PG_FREE;
1277		mtx_lock(&vm_page_queue_free_mtx);
1278		cnt.v_free_count++;
1279		if ((m->flags & PG_ZERO) != 0) {
1280			vm_phys_free_pages(m, 0);
1281			++vm_page_zero_count;
1282		} else {
1283			vm_phys_free_pages(m, 0);
1284			vm_page_zero_idle_wakeup();
1285		}
1286		vm_page_free_wakeup();
1287		mtx_unlock(&vm_page_queue_free_mtx);
1288	}
1289}
1290
1291/*
1292 *	vm_page_wire:
1293 *
1294 *	Mark this page as wired down by yet
1295 *	another map, removing it from paging queues
1296 *	as necessary.
1297 *
1298 *	The page queues must be locked.
1299 *	This routine may not block.
1300 */
1301void
1302vm_page_wire(vm_page_t m)
1303{
1304
1305	/*
1306	 * Only bump the wire statistics if the page is not already wired,
1307	 * and only unqueue the page if it is on some queue (if it is unmanaged
1308	 * it is already off the queues).
1309	 */
1310	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1311	if (m->flags & PG_FICTITIOUS)
1312		return;
1313	if (m->wire_count == 0) {
1314		if ((m->flags & PG_UNMANAGED) == 0)
1315			vm_pageq_remove(m);
1316		atomic_add_int(&cnt.v_wire_count, 1);
1317	}
1318	m->wire_count++;
1319	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1320}
1321
1322/*
1323 *	vm_page_unwire:
1324 *
1325 *	Release one wiring of this page, potentially
1326 *	enabling it to be paged again.
1327 *
1328 *	Many pages placed on the inactive queue should actually go
1329 *	into the cache, but it is difficult to figure out which.  What
1330 *	we do instead, if the inactive target is well met, is to put
1331 *	clean pages at the head of the inactive queue instead of the tail.
1332 *	This will cause them to be moved to the cache more quickly and
1333 *	if not actively re-referenced, freed more quickly.  If we just
1334 *	stick these pages at the end of the inactive queue, heavy filesystem
1335 *	meta-data accesses can cause an unnecessary paging load on memory bound
1336 *	processes.  This optimization causes one-time-use metadata to be
1337 *	reused more quickly.
1338 *
1339 *	BUT, if we are in a low-memory situation we have no choice but to
1340 *	put clean pages on the cache queue.
1341 *
1342 *	A number of routines use vm_page_unwire() to guarantee that the page
1343 *	will go into either the inactive or active queues, and will NEVER
1344 *	be placed in the cache - for example, just after dirtying a page.
1345 *	dirty pages in the cache are not allowed.
1346 *
1347 *	The page queues must be locked.
1348 *	This routine may not block.
1349 */
1350void
1351vm_page_unwire(vm_page_t m, int activate)
1352{
1353
1354	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1355	if (m->flags & PG_FICTITIOUS)
1356		return;
1357	if (m->wire_count > 0) {
1358		m->wire_count--;
1359		if (m->wire_count == 0) {
1360			atomic_subtract_int(&cnt.v_wire_count, 1);
1361			if (m->flags & PG_UNMANAGED) {
1362				;
1363			} else if (activate)
1364				vm_pageq_enqueue(PQ_ACTIVE, m);
1365			else {
1366				vm_page_flag_clear(m, PG_WINATCFLS);
1367				vm_pageq_enqueue(PQ_INACTIVE, m);
1368			}
1369		}
1370	} else {
1371		panic("vm_page_unwire: invalid wire count: %d", m->wire_count);
1372	}
1373}
1374
1375
1376/*
1377 * Move the specified page to the inactive queue.  If the page has
1378 * any associated swap, the swap is deallocated.
1379 *
1380 * Normally athead is 0 resulting in LRU operation.  athead is set
1381 * to 1 if we want this page to be 'as if it were placed in the cache',
1382 * except without unmapping it from the process address space.
1383 *
1384 * This routine may not block.
1385 */
1386static inline void
1387_vm_page_deactivate(vm_page_t m, int athead)
1388{
1389
1390	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1391
1392	/*
1393	 * Ignore if already inactive.
1394	 */
1395	if (VM_PAGE_INQUEUE2(m, PQ_INACTIVE))
1396		return;
1397	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1398		vm_page_flag_clear(m, PG_WINATCFLS);
1399		vm_pageq_remove(m);
1400		if (athead)
1401			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1402		else
1403			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1404		VM_PAGE_SETQUEUE2(m, PQ_INACTIVE);
1405		cnt.v_inactive_count++;
1406	}
1407}
1408
1409void
1410vm_page_deactivate(vm_page_t m)
1411{
1412    _vm_page_deactivate(m, 0);
1413}
1414
1415/*
1416 * vm_page_try_to_cache:
1417 *
1418 * Returns 0 on failure, 1 on success
1419 */
1420int
1421vm_page_try_to_cache(vm_page_t m)
1422{
1423
1424	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1425	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1426	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1427	    (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
1428		return (0);
1429	}
1430	pmap_remove_all(m);
1431	if (m->dirty)
1432		return (0);
1433	vm_page_cache(m);
1434	return (1);
1435}
1436
1437/*
1438 * vm_page_try_to_free()
1439 *
1440 *	Attempt to free the page.  If we cannot free it, we do nothing.
1441 *	1 is returned on success, 0 on failure.
1442 */
1443int
1444vm_page_try_to_free(vm_page_t m)
1445{
1446
1447	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1448	if (m->object != NULL)
1449		VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1450	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1451	    (m->oflags & VPO_BUSY) || (m->flags & PG_UNMANAGED)) {
1452		return (0);
1453	}
1454	pmap_remove_all(m);
1455	if (m->dirty)
1456		return (0);
1457	vm_page_free(m);
1458	return (1);
1459}
1460
1461/*
1462 * vm_page_cache
1463 *
1464 * Put the specified page onto the page cache queue (if appropriate).
1465 *
1466 * This routine may not block.
1467 */
1468void
1469vm_page_cache(vm_page_t m)
1470{
1471	vm_object_t object;
1472	vm_page_t root;
1473
1474	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1475	object = m->object;
1476	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1477	if ((m->flags & PG_UNMANAGED) || (m->oflags & VPO_BUSY) || m->busy ||
1478	    m->hold_count || m->wire_count) {
1479		panic("vm_page_cache: attempting to cache busy page");
1480	}
1481	if (m->valid == 0 || object->type == OBJT_DEFAULT) {
1482		/*
1483		 * Hypothesis: A cache-elgible page belonging to a
1484		 * default object must be zero filled.
1485		 */
1486		vm_page_free(m);
1487		return;
1488	}
1489	KASSERT((m->flags & PG_CACHED) == 0,
1490	    ("vm_page_cache: page %p is already cached", m));
1491	cnt.v_tcached++;
1492
1493	/*
1494	 * Remove all pmaps and indicate that the page is not
1495	 * writeable or mapped.
1496	 */
1497	pmap_remove_all(m);
1498	if (m->dirty != 0) {
1499		panic("vm_page_cache: caching a dirty page, pindex: %ld",
1500			(long)m->pindex);
1501	}
1502
1503	/*
1504	 * Remove the page from the paging queues.
1505	 */
1506	vm_pageq_remove(m);
1507
1508	/*
1509	 * Remove the page from the object's collection of resident
1510	 * pages.
1511	 */
1512	if (m != object->root)
1513		vm_page_splay(m->pindex, object->root);
1514	if (m->left == NULL)
1515		root = m->right;
1516	else {
1517		root = vm_page_splay(m->pindex, m->left);
1518		root->right = m->right;
1519	}
1520	object->root = root;
1521	TAILQ_REMOVE(&object->memq, m, listq);
1522	object->resident_page_count--;
1523	object->generation++;
1524
1525	/*
1526	 * Insert the page into the object's collection of cached pages
1527	 * and the physical memory allocator's cache/free page queues.
1528	 */
1529	vm_page_flag_set(m, PG_CACHED);
1530	vm_page_flag_clear(m, PG_ZERO);
1531	mtx_lock(&vm_page_queue_free_mtx);
1532	vm_phys_set_pool(VM_FREEPOOL_CACHE, m, 0);
1533	cnt.v_cache_count++;
1534	root = object->cache;
1535	if (root == NULL) {
1536		m->left = NULL;
1537		m->right = NULL;
1538	} else {
1539		root = vm_page_splay(m->pindex, root);
1540		if (m->pindex < root->pindex) {
1541			m->left = root->left;
1542			m->right = root;
1543			root->left = NULL;
1544		} else if (__predict_false(m->pindex == root->pindex))
1545			panic("vm_page_cache: offset already cached");
1546		else {
1547			m->right = root->right;
1548			m->left = root;
1549			root->right = NULL;
1550		}
1551	}
1552	object->cache = m;
1553	vm_phys_free_pages(m, 0);
1554	vm_page_free_wakeup();
1555	mtx_unlock(&vm_page_queue_free_mtx);
1556
1557	/*
1558	 * Increment the vnode's hold count if this is the object's only
1559	 * cached page.  Decrement the vnode's hold count if this was
1560	 * the object's only resident page.
1561	 */
1562	if (object->type == OBJT_VNODE) {
1563		if (root == NULL && object->resident_page_count != 0)
1564			vhold(object->handle);
1565		else if (root != NULL && object->resident_page_count == 0)
1566			vdrop(object->handle);
1567	}
1568}
1569
1570/*
1571 * vm_page_dontneed
1572 *
1573 *	Cache, deactivate, or do nothing as appropriate.  This routine
1574 *	is typically used by madvise() MADV_DONTNEED.
1575 *
1576 *	Generally speaking we want to move the page into the cache so
1577 *	it gets reused quickly.  However, this can result in a silly syndrome
1578 *	due to the page recycling too quickly.  Small objects will not be
1579 *	fully cached.  On the otherhand, if we move the page to the inactive
1580 *	queue we wind up with a problem whereby very large objects
1581 *	unnecessarily blow away our inactive and cache queues.
1582 *
1583 *	The solution is to move the pages based on a fixed weighting.  We
1584 *	either leave them alone, deactivate them, or move them to the cache,
1585 *	where moving them to the cache has the highest weighting.
1586 *	By forcing some pages into other queues we eventually force the
1587 *	system to balance the queues, potentially recovering other unrelated
1588 *	space from active.  The idea is to not force this to happen too
1589 *	often.
1590 */
1591void
1592vm_page_dontneed(vm_page_t m)
1593{
1594	static int dnweight;
1595	int dnw;
1596	int head;
1597
1598	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1599	dnw = ++dnweight;
1600
1601	/*
1602	 * occassionally leave the page alone
1603	 */
1604	if ((dnw & 0x01F0) == 0 ||
1605	    VM_PAGE_INQUEUE2(m, PQ_INACTIVE)) {
1606		if (m->act_count >= ACT_INIT)
1607			--m->act_count;
1608		return;
1609	}
1610
1611	if (m->dirty == 0 && pmap_is_modified(m))
1612		vm_page_dirty(m);
1613
1614	if (m->dirty || (dnw & 0x0070) == 0) {
1615		/*
1616		 * Deactivate the page 3 times out of 32.
1617		 */
1618		head = 0;
1619	} else {
1620		/*
1621		 * Cache the page 28 times out of every 32.  Note that
1622		 * the page is deactivated instead of cached, but placed
1623		 * at the head of the queue instead of the tail.
1624		 */
1625		head = 1;
1626	}
1627	_vm_page_deactivate(m, head);
1628}
1629
1630/*
1631 * Grab a page, waiting until we are waken up due to the page
1632 * changing state.  We keep on waiting, if the page continues
1633 * to be in the object.  If the page doesn't exist, first allocate it
1634 * and then conditionally zero it.
1635 *
1636 * This routine may block.
1637 */
1638vm_page_t
1639vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1640{
1641	vm_page_t m;
1642
1643	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1644retrylookup:
1645	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1646		if (vm_page_sleep_if_busy(m, TRUE, "pgrbwt")) {
1647			if ((allocflags & VM_ALLOC_RETRY) == 0)
1648				return (NULL);
1649			goto retrylookup;
1650		} else {
1651			if ((allocflags & VM_ALLOC_WIRED) != 0) {
1652				vm_page_lock_queues();
1653				vm_page_wire(m);
1654				vm_page_unlock_queues();
1655			}
1656			if ((allocflags & VM_ALLOC_NOBUSY) == 0)
1657				vm_page_busy(m);
1658			return (m);
1659		}
1660	}
1661	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1662	if (m == NULL) {
1663		VM_OBJECT_UNLOCK(object);
1664		VM_WAIT;
1665		VM_OBJECT_LOCK(object);
1666		if ((allocflags & VM_ALLOC_RETRY) == 0)
1667			return (NULL);
1668		goto retrylookup;
1669	} else if (m->valid != 0)
1670		return (m);
1671	if (allocflags & VM_ALLOC_ZERO && (m->flags & PG_ZERO) == 0)
1672		pmap_zero_page(m);
1673	return (m);
1674}
1675
1676/*
1677 * Mapping function for valid bits or for dirty bits in
1678 * a page.  May not block.
1679 *
1680 * Inputs are required to range within a page.
1681 */
1682int
1683vm_page_bits(int base, int size)
1684{
1685	int first_bit;
1686	int last_bit;
1687
1688	KASSERT(
1689	    base + size <= PAGE_SIZE,
1690	    ("vm_page_bits: illegal base/size %d/%d", base, size)
1691	);
1692
1693	if (size == 0)		/* handle degenerate case */
1694		return (0);
1695
1696	first_bit = base >> DEV_BSHIFT;
1697	last_bit = (base + size - 1) >> DEV_BSHIFT;
1698
1699	return ((2 << last_bit) - (1 << first_bit));
1700}
1701
1702/*
1703 *	vm_page_set_validclean:
1704 *
1705 *	Sets portions of a page valid and clean.  The arguments are expected
1706 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1707 *	of any partial chunks touched by the range.  The invalid portion of
1708 *	such chunks will be zero'd.
1709 *
1710 *	This routine may not block.
1711 *
1712 *	(base + size) must be less then or equal to PAGE_SIZE.
1713 */
1714void
1715vm_page_set_validclean(vm_page_t m, int base, int size)
1716{
1717	int pagebits;
1718	int frag;
1719	int endoff;
1720
1721	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1722	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1723	if (size == 0)	/* handle degenerate case */
1724		return;
1725
1726	/*
1727	 * If the base is not DEV_BSIZE aligned and the valid
1728	 * bit is clear, we have to zero out a portion of the
1729	 * first block.
1730	 */
1731	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1732	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0)
1733		pmap_zero_page_area(m, frag, base - frag);
1734
1735	/*
1736	 * If the ending offset is not DEV_BSIZE aligned and the
1737	 * valid bit is clear, we have to zero out a portion of
1738	 * the last block.
1739	 */
1740	endoff = base + size;
1741	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1742	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0)
1743		pmap_zero_page_area(m, endoff,
1744		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1)));
1745
1746	/*
1747	 * Set valid, clear dirty bits.  If validating the entire
1748	 * page we can safely clear the pmap modify bit.  We also
1749	 * use this opportunity to clear the VPO_NOSYNC flag.  If a process
1750	 * takes a write fault on a MAP_NOSYNC memory area the flag will
1751	 * be set again.
1752	 *
1753	 * We set valid bits inclusive of any overlap, but we can only
1754	 * clear dirty bits for DEV_BSIZE chunks that are fully within
1755	 * the range.
1756	 */
1757	pagebits = vm_page_bits(base, size);
1758	m->valid |= pagebits;
1759#if 0	/* NOT YET */
1760	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1761		frag = DEV_BSIZE - frag;
1762		base += frag;
1763		size -= frag;
1764		if (size < 0)
1765			size = 0;
1766	}
1767	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1768#endif
1769	m->dirty &= ~pagebits;
1770	if (base == 0 && size == PAGE_SIZE) {
1771		pmap_clear_modify(m);
1772		m->oflags &= ~VPO_NOSYNC;
1773	}
1774}
1775
1776void
1777vm_page_clear_dirty(vm_page_t m, int base, int size)
1778{
1779
1780	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1781	m->dirty &= ~vm_page_bits(base, size);
1782}
1783
1784/*
1785 *	vm_page_set_invalid:
1786 *
1787 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
1788 *	valid and dirty bits for the effected areas are cleared.
1789 *
1790 *	May not block.
1791 */
1792void
1793vm_page_set_invalid(vm_page_t m, int base, int size)
1794{
1795	int bits;
1796
1797	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1798	bits = vm_page_bits(base, size);
1799	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1800	if (m->valid == VM_PAGE_BITS_ALL && bits != 0)
1801		pmap_remove_all(m);
1802	m->valid &= ~bits;
1803	m->dirty &= ~bits;
1804	m->object->generation++;
1805}
1806
1807/*
1808 * vm_page_zero_invalid()
1809 *
1810 *	The kernel assumes that the invalid portions of a page contain
1811 *	garbage, but such pages can be mapped into memory by user code.
1812 *	When this occurs, we must zero out the non-valid portions of the
1813 *	page so user code sees what it expects.
1814 *
1815 *	Pages are most often semi-valid when the end of a file is mapped
1816 *	into memory and the file's size is not page aligned.
1817 */
1818void
1819vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1820{
1821	int b;
1822	int i;
1823
1824	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1825	/*
1826	 * Scan the valid bits looking for invalid sections that
1827	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
1828	 * valid bit may be set ) have already been zerod by
1829	 * vm_page_set_validclean().
1830	 */
1831	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1832		if (i == (PAGE_SIZE / DEV_BSIZE) ||
1833		    (m->valid & (1 << i))
1834		) {
1835			if (i > b) {
1836				pmap_zero_page_area(m,
1837				    b << DEV_BSHIFT, (i - b) << DEV_BSHIFT);
1838			}
1839			b = i + 1;
1840		}
1841	}
1842
1843	/*
1844	 * setvalid is TRUE when we can safely set the zero'd areas
1845	 * as being valid.  We can do this if there are no cache consistancy
1846	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
1847	 */
1848	if (setvalid)
1849		m->valid = VM_PAGE_BITS_ALL;
1850}
1851
1852/*
1853 *	vm_page_is_valid:
1854 *
1855 *	Is (partial) page valid?  Note that the case where size == 0
1856 *	will return FALSE in the degenerate case where the page is
1857 *	entirely invalid, and TRUE otherwise.
1858 *
1859 *	May not block.
1860 */
1861int
1862vm_page_is_valid(vm_page_t m, int base, int size)
1863{
1864	int bits = vm_page_bits(base, size);
1865
1866	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1867	if (m->valid && ((m->valid & bits) == bits))
1868		return 1;
1869	else
1870		return 0;
1871}
1872
1873/*
1874 * update dirty bits from pmap/mmu.  May not block.
1875 */
1876void
1877vm_page_test_dirty(vm_page_t m)
1878{
1879	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1880		vm_page_dirty(m);
1881	}
1882}
1883
1884int so_zerocp_fullpage = 0;
1885
1886/*
1887 *	Replace the given page with a copy.  The copied page assumes
1888 *	the portion of the given page's "wire_count" that is not the
1889 *	responsibility of this copy-on-write mechanism.
1890 *
1891 *	The object containing the given page must have a non-zero
1892 *	paging-in-progress count and be locked.
1893 */
1894void
1895vm_page_cowfault(vm_page_t m)
1896{
1897	vm_page_t mnew;
1898	vm_object_t object;
1899	vm_pindex_t pindex;
1900
1901	object = m->object;
1902	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1903	KASSERT(object->paging_in_progress != 0,
1904	    ("vm_page_cowfault: object %p's paging-in-progress count is zero.",
1905	    object));
1906	pindex = m->pindex;
1907
1908 retry_alloc:
1909	pmap_remove_all(m);
1910	vm_page_remove(m);
1911	mnew = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY);
1912	if (mnew == NULL) {
1913		vm_page_insert(m, object, pindex);
1914		vm_page_unlock_queues();
1915		VM_OBJECT_UNLOCK(object);
1916		VM_WAIT;
1917		VM_OBJECT_LOCK(object);
1918		if (m == vm_page_lookup(object, pindex)) {
1919			vm_page_lock_queues();
1920			goto retry_alloc;
1921		} else {
1922			/*
1923			 * Page disappeared during the wait.
1924			 */
1925			vm_page_lock_queues();
1926			return;
1927		}
1928	}
1929
1930	if (m->cow == 0) {
1931		/*
1932		 * check to see if we raced with an xmit complete when
1933		 * waiting to allocate a page.  If so, put things back
1934		 * the way they were
1935		 */
1936		vm_page_free(mnew);
1937		vm_page_insert(m, object, pindex);
1938	} else { /* clear COW & copy page */
1939		if (!so_zerocp_fullpage)
1940			pmap_copy_page(m, mnew);
1941		mnew->valid = VM_PAGE_BITS_ALL;
1942		vm_page_dirty(mnew);
1943		mnew->wire_count = m->wire_count - m->cow;
1944		m->wire_count = m->cow;
1945	}
1946}
1947
1948void
1949vm_page_cowclear(vm_page_t m)
1950{
1951
1952	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1953	if (m->cow) {
1954		m->cow--;
1955		/*
1956		 * let vm_fault add back write permission  lazily
1957		 */
1958	}
1959	/*
1960	 *  sf_buf_free() will free the page, so we needn't do it here
1961	 */
1962}
1963
1964void
1965vm_page_cowsetup(vm_page_t m)
1966{
1967
1968	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1969	m->cow++;
1970	pmap_remove_write(m);
1971}
1972
1973#include "opt_ddb.h"
1974#ifdef DDB
1975#include <sys/kernel.h>
1976
1977#include <ddb/ddb.h>
1978
1979DB_SHOW_COMMAND(page, vm_page_print_page_info)
1980{
1981	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1982	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1983	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1984	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1985	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1986	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1987	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1988	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1989	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1990	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1991}
1992
1993DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1994{
1995
1996	db_printf("PQ_FREE:");
1997	db_printf(" %d", cnt.v_free_count);
1998	db_printf("\n");
1999
2000	db_printf("PQ_CACHE:");
2001	db_printf(" %d", cnt.v_cache_count);
2002	db_printf("\n");
2003
2004	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
2005		*vm_page_queues[PQ_ACTIVE].cnt,
2006		*vm_page_queues[PQ_INACTIVE].cnt);
2007}
2008#endif /* DDB */
2009