vm_page.c revision 92654
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * The Mach Operating System project at Carnegie-Mellon University.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by the University of
19 *	California, Berkeley and its contributors.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37 * $FreeBSD: head/sys/vm/vm_page.c 92654 2002-03-19 09:11:49Z jeff $
38 */
39
40/*
41 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42 * All rights reserved.
43 *
44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45 *
46 * Permission to use, copy, modify and distribute this software and
47 * its documentation is hereby granted, provided that both the copyright
48 * notice and this permission notice appear in all copies of the
49 * software, derivative works or modified versions, and any portions
50 * thereof, and that both notices appear in supporting documentation.
51 *
52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55 *
56 * Carnegie Mellon requests users of this software to return to
57 *
58 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59 *  School of Computer Science
60 *  Carnegie Mellon University
61 *  Pittsburgh PA 15213-3890
62 *
63 * any improvements or extensions that they make and grant Carnegie the
64 * rights to redistribute these changes.
65 */
66
67/*
68 *			GENERAL RULES ON VM_PAGE MANIPULATION
69 *
70 *	- a pageq mutex is required when adding or removing a page from a
71 *	  page queue (vm_page_queue[]), regardless of other mutexes or the
72 *	  busy state of a page.
73 *
74 *	- a hash chain mutex is required when associating or disassociating
75 *	  a page from the VM PAGE CACHE hash table (vm_page_buckets),
76 *	  regardless of other mutexes or the busy state of a page.
77 *
78 *	- either a hash chain mutex OR a busied page is required in order
79 *	  to modify the page flags.  A hash chain mutex must be obtained in
80 *	  order to busy a page.  A page's flags cannot be modified by a
81 *	  hash chain mutex if the page is marked busy.
82 *
83 *	- The object memq mutex is held when inserting or removing
84 *	  pages from an object (vm_page_insert() or vm_page_remove()).  This
85 *	  is different from the object's main mutex.
86 *
87 *	Generally speaking, you have to be aware of side effects when running
88 *	vm_page ops.  A vm_page_lookup() will return with the hash chain
89 *	locked, whether it was able to lookup the page or not.  vm_page_free(),
90 *	vm_page_cache(), vm_page_activate(), and a number of other routines
91 *	will release the hash chain mutex for you.  Intermediate manipulation
92 *	routines such as vm_page_flag_set() expect the hash chain to be held
93 *	on entry and the hash chain will remain held on return.
94 *
95 *	pageq scanning can only occur with the pageq in question locked.
96 *	We have a known bottleneck with the active queue, but the cache
97 *	and free queues are actually arrays already.
98 */
99
100/*
101 *	Resident memory management module.
102 */
103
104#include <sys/param.h>
105#include <sys/systm.h>
106#include <sys/lock.h>
107#include <sys/malloc.h>
108#include <sys/mutex.h>
109#include <sys/proc.h>
110#include <sys/vmmeter.h>
111#include <sys/vnode.h>
112
113#include <vm/vm.h>
114#include <vm/vm_param.h>
115#include <vm/vm_kern.h>
116#include <vm/vm_object.h>
117#include <vm/vm_page.h>
118#include <vm/vm_pageout.h>
119#include <vm/vm_pager.h>
120#include <vm/vm_extern.h>
121#include <vm/uma.h>
122#include <vm/uma_int.h>
123
124/*
125 *	Associated with page of user-allocatable memory is a
126 *	page structure.
127 */
128static struct vm_page **vm_page_buckets; /* Array of buckets */
129static int vm_page_bucket_count;	/* How big is array? */
130static int vm_page_hash_mask;		/* Mask for hash function */
131static volatile int vm_page_bucket_generation;
132static struct mtx vm_buckets_mtx[BUCKET_HASH_SIZE];
133
134vm_page_t vm_page_array = 0;
135int vm_page_array_size = 0;
136long first_page = 0;
137int vm_page_zero_count = 0;
138
139/*
140 *	vm_set_page_size:
141 *
142 *	Sets the page size, perhaps based upon the memory
143 *	size.  Must be called before any use of page-size
144 *	dependent functions.
145 */
146void
147vm_set_page_size(void)
148{
149	if (cnt.v_page_size == 0)
150		cnt.v_page_size = PAGE_SIZE;
151	if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0)
152		panic("vm_set_page_size: page size not a power of two");
153}
154
155/*
156 *	vm_page_startup:
157 *
158 *	Initializes the resident memory module.
159 *
160 *	Allocates memory for the page cells, and
161 *	for the object/offset-to-page hash table headers.
162 *	Each page cell is initialized and placed on the free list.
163 */
164vm_offset_t
165vm_page_startup(vm_offset_t starta, vm_offset_t enda, vm_offset_t vaddr)
166{
167	vm_offset_t mapped;
168	struct vm_page **bucket;
169	vm_size_t npages, page_range;
170	vm_offset_t new_end;
171	int i;
172	vm_offset_t pa;
173	int nblocks;
174	vm_offset_t last_pa;
175
176	/* the biggest memory array is the second group of pages */
177	vm_offset_t end;
178	vm_offset_t biggestone, biggestsize;
179
180	vm_offset_t total;
181	vm_size_t bootpages;
182
183	total = 0;
184	biggestsize = 0;
185	biggestone = 0;
186	nblocks = 0;
187	vaddr = round_page(vaddr);
188
189	for (i = 0; phys_avail[i + 1]; i += 2) {
190		phys_avail[i] = round_page(phys_avail[i]);
191		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
192	}
193
194	for (i = 0; phys_avail[i + 1]; i += 2) {
195		int size = phys_avail[i + 1] - phys_avail[i];
196
197		if (size > biggestsize) {
198			biggestone = i;
199			biggestsize = size;
200		}
201		++nblocks;
202		total += size;
203	}
204
205	end = phys_avail[biggestone+1];
206
207	/*
208	 * Initialize the queue headers for the free queue, the active queue
209	 * and the inactive queue.
210	 */
211	vm_pageq_init();
212
213	/*
214	 * Allocate memory for use when boot strapping the kernel memory allocator
215	 */
216	bootpages = UMA_BOOT_PAGES * UMA_SLAB_SIZE;
217	new_end = end - bootpages;
218	new_end = trunc_page(new_end);
219	mapped = pmap_map(&vaddr, new_end, end,
220	    VM_PROT_READ | VM_PROT_WRITE);
221	bzero((caddr_t) mapped, end - new_end);
222	uma_startup((caddr_t)mapped);
223
224	end = new_end;
225
226	/*
227	 * Allocate (and initialize) the hash table buckets.
228	 *
229	 * The number of buckets MUST BE a power of 2, and the actual value is
230	 * the next power of 2 greater than the number of physical pages in
231	 * the system.
232	 *
233	 * We make the hash table approximately 2x the number of pages to
234	 * reduce the chain length.  This is about the same size using the
235	 * singly-linked list as the 1x hash table we were using before
236	 * using TAILQ but the chain length will be smaller.
237	 *
238	 * Note: This computation can be tweaked if desired.
239	 */
240	if (vm_page_bucket_count == 0) {
241		vm_page_bucket_count = 1;
242		while (vm_page_bucket_count < atop(total))
243			vm_page_bucket_count <<= 1;
244	}
245	vm_page_bucket_count <<= 1;
246	vm_page_hash_mask = vm_page_bucket_count - 1;
247
248	/*
249	 * Validate these addresses.
250	 */
251	new_end = end - vm_page_bucket_count * sizeof(struct vm_page *);
252	new_end = trunc_page(new_end);
253	mapped = pmap_map(&vaddr, new_end, end,
254	    VM_PROT_READ | VM_PROT_WRITE);
255	bzero((caddr_t) mapped, end - new_end);
256
257	vm_page_buckets = (struct vm_page **)mapped;
258	bucket = vm_page_buckets;
259	for (i = 0; i < vm_page_bucket_count; i++) {
260		*bucket = NULL;
261		bucket++;
262	}
263	for (i = 0; i < BUCKET_HASH_SIZE; ++i)
264		mtx_init(&vm_buckets_mtx[i],  "vm buckets hash mutexes", MTX_DEF);
265
266	/*
267	 * Compute the number of pages of memory that will be available for
268	 * use (taking into account the overhead of a page structure per
269	 * page).
270	 */
271	first_page = phys_avail[0] / PAGE_SIZE;
272	page_range = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE - first_page;
273	npages = (total - (page_range * sizeof(struct vm_page)) -
274	    (end - new_end)) / PAGE_SIZE;
275	end = new_end;
276
277	/*
278	 * Initialize the mem entry structures now, and put them in the free
279	 * queue.
280	 */
281	new_end = trunc_page(end - page_range * sizeof(struct vm_page));
282	mapped = pmap_map(&vaddr, new_end, end,
283	    VM_PROT_READ | VM_PROT_WRITE);
284	vm_page_array = (vm_page_t) mapped;
285
286	/*
287	 * Clear all of the page structures
288	 */
289	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
290	vm_page_array_size = page_range;
291
292	/*
293	 * Construct the free queue(s) in descending order (by physical
294	 * address) so that the first 16MB of physical memory is allocated
295	 * last rather than first.  On large-memory machines, this avoids
296	 * the exhaustion of low physical memory before isa_dmainit has run.
297	 */
298	cnt.v_page_count = 0;
299	cnt.v_free_count = 0;
300	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
301		pa = phys_avail[i];
302		if (i == biggestone)
303			last_pa = new_end;
304		else
305			last_pa = phys_avail[i + 1];
306		while (pa < last_pa && npages-- > 0) {
307			vm_pageq_add_new_page(pa);
308			pa += PAGE_SIZE;
309		}
310	}
311	return (vaddr);
312}
313
314/*
315 *	vm_page_hash:
316 *
317 *	Distributes the object/offset key pair among hash buckets.
318 *
319 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
320 *	This routine may not block.
321 *
322 *	We try to randomize the hash based on the object to spread the pages
323 *	out in the hash table without it costing us too much.
324 */
325static __inline int
326vm_page_hash(vm_object_t object, vm_pindex_t pindex)
327{
328	int i = ((uintptr_t)object + pindex) ^ object->hash_rand;
329
330	return (i & vm_page_hash_mask);
331}
332
333void
334vm_page_flag_set(vm_page_t m, unsigned short bits)
335{
336	GIANT_REQUIRED;
337	m->flags |= bits;
338}
339
340void
341vm_page_flag_clear(vm_page_t m, unsigned short bits)
342{
343	GIANT_REQUIRED;
344	m->flags &= ~bits;
345}
346
347void
348vm_page_busy(vm_page_t m)
349{
350	KASSERT((m->flags & PG_BUSY) == 0,
351	    ("vm_page_busy: page already busy!!!"));
352	vm_page_flag_set(m, PG_BUSY);
353}
354
355/*
356 *      vm_page_flash:
357 *
358 *      wakeup anyone waiting for the page.
359 */
360void
361vm_page_flash(vm_page_t m)
362{
363	if (m->flags & PG_WANTED) {
364		vm_page_flag_clear(m, PG_WANTED);
365		wakeup(m);
366	}
367}
368
369/*
370 *      vm_page_wakeup:
371 *
372 *      clear the PG_BUSY flag and wakeup anyone waiting for the
373 *      page.
374 *
375 */
376void
377vm_page_wakeup(vm_page_t m)
378{
379	KASSERT(m->flags & PG_BUSY, ("vm_page_wakeup: page not busy!!!"));
380	vm_page_flag_clear(m, PG_BUSY);
381	vm_page_flash(m);
382}
383
384/*
385 *
386 *
387 */
388void
389vm_page_io_start(vm_page_t m)
390{
391	GIANT_REQUIRED;
392	m->busy++;
393}
394
395void
396vm_page_io_finish(vm_page_t m)
397{
398	GIANT_REQUIRED;
399	m->busy--;
400	if (m->busy == 0)
401		vm_page_flash(m);
402}
403
404/*
405 * Keep page from being freed by the page daemon
406 * much of the same effect as wiring, except much lower
407 * overhead and should be used only for *very* temporary
408 * holding ("wiring").
409 */
410void
411vm_page_hold(vm_page_t mem)
412{
413        GIANT_REQUIRED;
414        mem->hold_count++;
415}
416
417void
418vm_page_unhold(vm_page_t mem)
419{
420	GIANT_REQUIRED;
421	--mem->hold_count;
422	KASSERT(mem->hold_count >= 0, ("vm_page_unhold: hold count < 0!!!"));
423	if (mem->hold_count == 0 && mem->queue == PQ_HOLD)
424		vm_page_free_toq(mem);
425}
426
427/*
428 *	vm_page_protect:
429 *
430 *	Reduce the protection of a page.  This routine never raises the
431 *	protection and therefore can be safely called if the page is already
432 *	at VM_PROT_NONE (it will be a NOP effectively ).
433 */
434void
435vm_page_protect(vm_page_t mem, int prot)
436{
437	if (prot == VM_PROT_NONE) {
438		if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) {
439			pmap_page_protect(mem, VM_PROT_NONE);
440			vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED);
441		}
442	} else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) {
443		pmap_page_protect(mem, VM_PROT_READ);
444		vm_page_flag_clear(mem, PG_WRITEABLE);
445	}
446}
447/*
448 *	vm_page_zero_fill:
449 *
450 *	Zero-fill the specified page.
451 *	Written as a standard pagein routine, to
452 *	be used by the zero-fill object.
453 */
454boolean_t
455vm_page_zero_fill(vm_page_t m)
456{
457	pmap_zero_page(VM_PAGE_TO_PHYS(m));
458	return (TRUE);
459}
460
461/*
462 *	vm_page_copy:
463 *
464 *	Copy one page to another
465 */
466void
467vm_page_copy(vm_page_t src_m, vm_page_t dest_m)
468{
469	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
470	dest_m->valid = VM_PAGE_BITS_ALL;
471}
472
473/*
474 *	vm_page_free:
475 *
476 *	Free a page
477 *
478 *	The clearing of PG_ZERO is a temporary safety until the code can be
479 *	reviewed to determine that PG_ZERO is being properly cleared on
480 *	write faults or maps.  PG_ZERO was previously cleared in
481 *	vm_page_alloc().
482 */
483void
484vm_page_free(vm_page_t m)
485{
486	vm_page_flag_clear(m, PG_ZERO);
487	vm_page_free_toq(m);
488	vm_page_zero_idle_wakeup();
489}
490
491/*
492 *	vm_page_free_zero:
493 *
494 *	Free a page to the zerod-pages queue
495 */
496void
497vm_page_free_zero(vm_page_t m)
498{
499	vm_page_flag_set(m, PG_ZERO);
500	vm_page_free_toq(m);
501}
502
503/*
504 *	vm_page_sleep_busy:
505 *
506 *	Wait until page is no longer PG_BUSY or (if also_m_busy is TRUE)
507 *	m->busy is zero.  Returns TRUE if it had to sleep ( including if
508 *	it almost had to sleep and made temporary spl*() mods), FALSE
509 *	otherwise.
510 *
511 *	This routine assumes that interrupts can only remove the busy
512 *	status from a page, not set the busy status or change it from
513 *	PG_BUSY to m->busy or vise versa (which would create a timing
514 *	window).
515 */
516int
517vm_page_sleep_busy(vm_page_t m, int also_m_busy, const char *msg)
518{
519	GIANT_REQUIRED;
520	if ((m->flags & PG_BUSY) || (also_m_busy && m->busy))  {
521		int s = splvm();
522		if ((m->flags & PG_BUSY) || (also_m_busy && m->busy)) {
523			/*
524			 * Page is busy. Wait and retry.
525			 */
526			vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
527			tsleep(m, PVM, msg, 0);
528		}
529		splx(s);
530		return (TRUE);
531		/* not reached */
532	}
533	return (FALSE);
534}
535/*
536 *	vm_page_dirty:
537 *
538 *	make page all dirty
539 */
540void
541vm_page_dirty(vm_page_t m)
542{
543	KASSERT(m->queue - m->pc != PQ_CACHE,
544	    ("vm_page_dirty: page in cache!"));
545	m->dirty = VM_PAGE_BITS_ALL;
546}
547
548/*
549 *	vm_page_undirty:
550 *
551 *	Set page to not be dirty.  Note: does not clear pmap modify bits
552 */
553void
554vm_page_undirty(vm_page_t m)
555{
556	m->dirty = 0;
557}
558
559/*
560 *	vm_page_insert:		[ internal use only ]
561 *
562 *	Inserts the given mem entry into the object and object list.
563 *
564 *	The pagetables are not updated but will presumably fault the page
565 *	in if necessary, or if a kernel page the caller will at some point
566 *	enter the page into the kernel's pmap.  We are not allowed to block
567 *	here so we *can't* do this anyway.
568 *
569 *	The object and page must be locked, and must be splhigh.
570 *	This routine may not block.
571 */
572void
573vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex)
574{
575	struct vm_page **bucket;
576
577	GIANT_REQUIRED;
578
579	if (m->object != NULL)
580		panic("vm_page_insert: already inserted");
581
582	/*
583	 * Record the object/offset pair in this page
584	 */
585	m->object = object;
586	m->pindex = pindex;
587
588	/*
589	 * Insert it into the object_object/offset hash table
590	 */
591	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
592	m->hnext = *bucket;
593	*bucket = m;
594	vm_page_bucket_generation++;
595
596	/*
597	 * Now link into the object's list of backed pages.
598	 */
599	TAILQ_INSERT_TAIL(&object->memq, m, listq);
600	object->generation++;
601
602	/*
603	 * show that the object has one more resident page.
604	 */
605	object->resident_page_count++;
606
607	/*
608	 * Since we are inserting a new and possibly dirty page,
609	 * update the object's OBJ_WRITEABLE and OBJ_MIGHTBEDIRTY flags.
610	 */
611	if (m->flags & PG_WRITEABLE)
612		vm_object_set_writeable_dirty(object);
613}
614
615/*
616 *	vm_page_remove:
617 *				NOTE: used by device pager as well -wfj
618 *
619 *	Removes the given mem entry from the object/offset-page
620 *	table and the object page list, but do not invalidate/terminate
621 *	the backing store.
622 *
623 *	The object and page must be locked, and at splhigh.
624 *	The underlying pmap entry (if any) is NOT removed here.
625 *	This routine may not block.
626 */
627void
628vm_page_remove(vm_page_t m)
629{
630	vm_object_t object;
631
632	GIANT_REQUIRED;
633
634	if (m->object == NULL)
635		return;
636
637	if ((m->flags & PG_BUSY) == 0) {
638		panic("vm_page_remove: page not busy");
639	}
640
641	/*
642	 * Basically destroy the page.
643	 */
644	vm_page_wakeup(m);
645
646	object = m->object;
647
648	/*
649	 * Remove from the object_object/offset hash table.  The object
650	 * must be on the hash queue, we will panic if it isn't
651	 *
652	 * Note: we must NULL-out m->hnext to prevent loops in detached
653	 * buffers with vm_page_lookup().
654	 */
655	{
656		struct vm_page **bucket;
657
658		bucket = &vm_page_buckets[vm_page_hash(m->object, m->pindex)];
659		while (*bucket != m) {
660			if (*bucket == NULL)
661				panic("vm_page_remove(): page not found in hash");
662			bucket = &(*bucket)->hnext;
663		}
664		*bucket = m->hnext;
665		m->hnext = NULL;
666		vm_page_bucket_generation++;
667	}
668
669	/*
670	 * Now remove from the object's list of backed pages.
671	 */
672	TAILQ_REMOVE(&object->memq, m, listq);
673
674	/*
675	 * And show that the object has one fewer resident page.
676	 */
677	object->resident_page_count--;
678	object->generation++;
679
680	m->object = NULL;
681}
682
683/*
684 *	vm_page_lookup:
685 *
686 *	Returns the page associated with the object/offset
687 *	pair specified; if none is found, NULL is returned.
688 *
689 *	NOTE: the code below does not lock.  It will operate properly if
690 *	an interrupt makes a change, but the generation algorithm will not
691 *	operate properly in an SMP environment where both cpu's are able to run
692 *	kernel code simultaneously.
693 *
694 *	The object must be locked.  No side effects.
695 *	This routine may not block.
696 *	This is a critical path routine
697 */
698vm_page_t
699vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
700{
701	vm_page_t m;
702	struct vm_page **bucket;
703	int generation;
704
705	/*
706	 * Search the hash table for this object/offset pair
707	 */
708retry:
709	generation = vm_page_bucket_generation;
710	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
711	for (m = *bucket; m != NULL; m = m->hnext) {
712		if ((m->object == object) && (m->pindex == pindex)) {
713			if (vm_page_bucket_generation != generation)
714				goto retry;
715			return (m);
716		}
717	}
718	if (vm_page_bucket_generation != generation)
719		goto retry;
720	return (NULL);
721}
722
723/*
724 *	vm_page_rename:
725 *
726 *	Move the given memory entry from its
727 *	current object to the specified target object/offset.
728 *
729 *	The object must be locked.
730 *	This routine may not block.
731 *
732 *	Note: this routine will raise itself to splvm(), the caller need not.
733 *
734 *	Note: swap associated with the page must be invalidated by the move.  We
735 *	      have to do this for several reasons:  (1) we aren't freeing the
736 *	      page, (2) we are dirtying the page, (3) the VM system is probably
737 *	      moving the page from object A to B, and will then later move
738 *	      the backing store from A to B and we can't have a conflict.
739 *
740 *	Note: we *always* dirty the page.  It is necessary both for the
741 *	      fact that we moved it, and because we may be invalidating
742 *	      swap.  If the page is on the cache, we have to deactivate it
743 *	      or vm_page_dirty() will panic.  Dirty pages are not allowed
744 *	      on the cache.
745 */
746void
747vm_page_rename(vm_page_t m, vm_object_t new_object, vm_pindex_t new_pindex)
748{
749	int s;
750
751	s = splvm();
752	vm_page_remove(m);
753	vm_page_insert(m, new_object, new_pindex);
754	if (m->queue - m->pc == PQ_CACHE)
755		vm_page_deactivate(m);
756	vm_page_dirty(m);
757	splx(s);
758}
759
760/*
761 *	vm_page_select_cache:
762 *
763 *	Find a page on the cache queue with color optimization.  As pages
764 *	might be found, but not applicable, they are deactivated.  This
765 *	keeps us from using potentially busy cached pages.
766 *
767 *	This routine must be called at splvm().
768 *	This routine may not block.
769 */
770static vm_page_t
771vm_page_select_cache(vm_object_t object, vm_pindex_t pindex)
772{
773	vm_page_t m;
774
775	GIANT_REQUIRED;
776	while (TRUE) {
777		m = vm_pageq_find(
778		    PQ_CACHE,
779		    (pindex + object->pg_color) & PQ_L2_MASK,
780		    FALSE
781		);
782		if (m && ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy ||
783			       m->hold_count || m->wire_count)) {
784			vm_page_deactivate(m);
785			continue;
786		}
787		return m;
788	}
789}
790
791/*
792 *	vm_page_select_free:
793 *
794 *	Find a free or zero page, with specified preference.
795 *
796 *	This routine must be called at splvm().
797 *	This routine may not block.
798 */
799static __inline vm_page_t
800vm_page_select_free(vm_object_t object, vm_pindex_t pindex, boolean_t prefer_zero)
801{
802	vm_page_t m;
803
804	m = vm_pageq_find(
805		PQ_FREE,
806		(pindex + object->pg_color) & PQ_L2_MASK,
807		prefer_zero
808	);
809	return (m);
810}
811
812/*
813 *	vm_page_alloc:
814 *
815 *	Allocate and return a memory cell associated
816 *	with this VM object/offset pair.
817 *
818 *	page_req classes:
819 *	VM_ALLOC_NORMAL		normal process request
820 *	VM_ALLOC_SYSTEM		system *really* needs a page
821 *	VM_ALLOC_INTERRUPT	interrupt time request
822 *	VM_ALLOC_ZERO		zero page
823 *
824 *	This routine may not block.
825 *
826 *	Additional special handling is required when called from an
827 *	interrupt (VM_ALLOC_INTERRUPT).  We are not allowed to mess with
828 *	the page cache in this case.
829 */
830vm_page_t
831vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int page_req)
832{
833	vm_page_t m = NULL;
834	int s;
835
836	GIANT_REQUIRED;
837
838	KASSERT(!vm_page_lookup(object, pindex),
839		("vm_page_alloc: page already allocated"));
840
841	/*
842	 * The pager is allowed to eat deeper into the free page list.
843	 */
844	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
845		page_req = VM_ALLOC_SYSTEM;
846	};
847
848	s = splvm();
849
850loop:
851	if (cnt.v_free_count > cnt.v_free_reserved) {
852		/*
853		 * Allocate from the free queue if there are plenty of pages
854		 * in it.
855		 */
856		if (page_req == VM_ALLOC_ZERO)
857			m = vm_page_select_free(object, pindex, TRUE);
858		else
859			m = vm_page_select_free(object, pindex, FALSE);
860	} else if (
861	    (page_req == VM_ALLOC_SYSTEM &&
862	     cnt.v_cache_count == 0 &&
863	     cnt.v_free_count > cnt.v_interrupt_free_min) ||
864	    (page_req == VM_ALLOC_INTERRUPT && cnt.v_free_count > 0)
865	) {
866		/*
867		 * Interrupt or system, dig deeper into the free list.
868		 */
869		m = vm_page_select_free(object, pindex, FALSE);
870	} else if (page_req != VM_ALLOC_INTERRUPT) {
871		/*
872		 * Allocatable from cache (non-interrupt only).  On success,
873		 * we must free the page and try again, thus ensuring that
874		 * cnt.v_*_free_min counters are replenished.
875		 */
876		m = vm_page_select_cache(object, pindex);
877		if (m == NULL) {
878			splx(s);
879#if defined(DIAGNOSTIC)
880			if (cnt.v_cache_count > 0)
881				printf("vm_page_alloc(NORMAL): missing pages on cache queue: %d\n", cnt.v_cache_count);
882#endif
883			vm_pageout_deficit++;
884			pagedaemon_wakeup();
885			return (NULL);
886		}
887		KASSERT(m->dirty == 0, ("Found dirty cache page %p", m));
888		vm_page_busy(m);
889		vm_page_protect(m, VM_PROT_NONE);
890		vm_page_free(m);
891		goto loop;
892	} else {
893		/*
894		 * Not allocatable from cache from interrupt, give up.
895		 */
896		splx(s);
897		vm_pageout_deficit++;
898		pagedaemon_wakeup();
899		return (NULL);
900	}
901
902	/*
903	 *  At this point we had better have found a good page.
904	 */
905
906	KASSERT(
907	    m != NULL,
908	    ("vm_page_alloc(): missing page on free queue\n")
909	);
910
911	/*
912	 * Remove from free queue
913	 */
914
915	vm_pageq_remove_nowakeup(m);
916
917	/*
918	 * Initialize structure.  Only the PG_ZERO flag is inherited.
919	 */
920	if (m->flags & PG_ZERO) {
921		vm_page_zero_count--;
922		m->flags = PG_ZERO | PG_BUSY;
923	} else {
924		m->flags = PG_BUSY;
925	}
926	m->wire_count = 0;
927	m->hold_count = 0;
928	m->act_count = 0;
929	m->busy = 0;
930	m->valid = 0;
931	KASSERT(m->dirty == 0, ("vm_page_alloc: free/cache page %p was dirty", m));
932
933	/*
934	 * vm_page_insert() is safe prior to the splx().  Note also that
935	 * inserting a page here does not insert it into the pmap (which
936	 * could cause us to block allocating memory).  We cannot block
937	 * anywhere.
938	 */
939	vm_page_insert(m, object, pindex);
940
941	/*
942	 * Don't wakeup too often - wakeup the pageout daemon when
943	 * we would be nearly out of memory.
944	 */
945	if (vm_paging_needed())
946		pagedaemon_wakeup();
947
948	splx(s);
949	return (m);
950}
951
952/*
953 *	vm_wait:	(also see VM_WAIT macro)
954 *
955 *	Block until free pages are available for allocation
956 *	- Called in various places before memory allocations.
957 */
958void
959vm_wait(void)
960{
961	int s;
962
963	s = splvm();
964	if (curproc == pageproc) {
965		vm_pageout_pages_needed = 1;
966		tsleep(&vm_pageout_pages_needed, PSWP, "VMWait", 0);
967	} else {
968		if (!vm_pages_needed) {
969			vm_pages_needed = 1;
970			wakeup(&vm_pages_needed);
971		}
972		tsleep(&cnt.v_free_count, PVM, "vmwait", 0);
973	}
974	splx(s);
975}
976
977/*
978 *	vm_waitpfault:	(also see VM_WAITPFAULT macro)
979 *
980 *	Block until free pages are available for allocation
981 *	- Called only in vm_fault so that processes page faulting
982 *	  can be easily tracked.
983 *	- Sleeps at a lower priority than vm_wait() so that vm_wait()ing
984 *	  processes will be able to grab memory first.  Do not change
985 *	  this balance without careful testing first.
986 */
987void
988vm_waitpfault(void)
989{
990	int s;
991
992	s = splvm();
993	if (!vm_pages_needed) {
994		vm_pages_needed = 1;
995		wakeup(&vm_pages_needed);
996	}
997	tsleep(&cnt.v_free_count, PUSER, "pfault", 0);
998	splx(s);
999}
1000
1001/*
1002 *	vm_page_activate:
1003 *
1004 *	Put the specified page on the active list (if appropriate).
1005 *	Ensure that act_count is at least ACT_INIT but do not otherwise
1006 *	mess with it.
1007 *
1008 *	The page queues must be locked.
1009 *	This routine may not block.
1010 */
1011void
1012vm_page_activate(vm_page_t m)
1013{
1014	int s;
1015
1016	GIANT_REQUIRED;
1017	s = splvm();
1018	if (m->queue != PQ_ACTIVE) {
1019		if ((m->queue - m->pc) == PQ_CACHE)
1020			cnt.v_reactivated++;
1021		vm_pageq_remove(m);
1022		if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1023			if (m->act_count < ACT_INIT)
1024				m->act_count = ACT_INIT;
1025			vm_pageq_enqueue(PQ_ACTIVE, m);
1026		}
1027	} else {
1028		if (m->act_count < ACT_INIT)
1029			m->act_count = ACT_INIT;
1030	}
1031	splx(s);
1032}
1033
1034/*
1035 *	vm_page_free_wakeup:
1036 *
1037 *	Helper routine for vm_page_free_toq() and vm_page_cache().  This
1038 *	routine is called when a page has been added to the cache or free
1039 *	queues.
1040 *
1041 *	This routine may not block.
1042 *	This routine must be called at splvm()
1043 */
1044static __inline void
1045vm_page_free_wakeup(void)
1046{
1047	/*
1048	 * if pageout daemon needs pages, then tell it that there are
1049	 * some free.
1050	 */
1051	if (vm_pageout_pages_needed &&
1052	    cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) {
1053		wakeup(&vm_pageout_pages_needed);
1054		vm_pageout_pages_needed = 0;
1055	}
1056	/*
1057	 * wakeup processes that are waiting on memory if we hit a
1058	 * high water mark. And wakeup scheduler process if we have
1059	 * lots of memory. this process will swapin processes.
1060	 */
1061	if (vm_pages_needed && !vm_page_count_min()) {
1062		vm_pages_needed = 0;
1063		wakeup(&cnt.v_free_count);
1064	}
1065}
1066
1067/*
1068 *	vm_page_free_toq:
1069 *
1070 *	Returns the given page to the PQ_FREE list,
1071 *	disassociating it with any VM object.
1072 *
1073 *	Object and page must be locked prior to entry.
1074 *	This routine may not block.
1075 */
1076
1077void
1078vm_page_free_toq(vm_page_t m)
1079{
1080	int s;
1081	struct vpgqueues *pq;
1082	vm_object_t object = m->object;
1083
1084	GIANT_REQUIRED;
1085	s = splvm();
1086	cnt.v_tfree++;
1087
1088	if (m->busy || ((m->queue - m->pc) == PQ_FREE)) {
1089		printf(
1090		"vm_page_free: pindex(%lu), busy(%d), PG_BUSY(%d), hold(%d)\n",
1091		    (u_long)m->pindex, m->busy, (m->flags & PG_BUSY) ? 1 : 0,
1092		    m->hold_count);
1093		if ((m->queue - m->pc) == PQ_FREE)
1094			panic("vm_page_free: freeing free page");
1095		else
1096			panic("vm_page_free: freeing busy page");
1097	}
1098
1099	/*
1100	 * unqueue, then remove page.  Note that we cannot destroy
1101	 * the page here because we do not want to call the pager's
1102	 * callback routine until after we've put the page on the
1103	 * appropriate free queue.
1104	 */
1105	vm_pageq_remove_nowakeup(m);
1106	vm_page_remove(m);
1107
1108	/*
1109	 * If fictitious remove object association and
1110	 * return, otherwise delay object association removal.
1111	 */
1112	if ((m->flags & PG_FICTITIOUS) != 0) {
1113		splx(s);
1114		return;
1115	}
1116
1117	m->valid = 0;
1118	vm_page_undirty(m);
1119
1120	if (m->wire_count != 0) {
1121		if (m->wire_count > 1) {
1122			panic("vm_page_free: invalid wire count (%d), pindex: 0x%lx",
1123				m->wire_count, (long)m->pindex);
1124		}
1125		panic("vm_page_free: freeing wired page\n");
1126	}
1127
1128	/*
1129	 * If we've exhausted the object's resident pages we want to free
1130	 * it up.
1131	 */
1132	if (object &&
1133	    (object->type == OBJT_VNODE) &&
1134	    ((object->flags & OBJ_DEAD) == 0)
1135	) {
1136		struct vnode *vp = (struct vnode *)object->handle;
1137
1138		if (vp && VSHOULDFREE(vp))
1139			vfree(vp);
1140	}
1141
1142	/*
1143	 * Clear the UNMANAGED flag when freeing an unmanaged page.
1144	 */
1145	if (m->flags & PG_UNMANAGED) {
1146		m->flags &= ~PG_UNMANAGED;
1147	} else {
1148#ifdef __alpha__
1149		pmap_page_is_free(m);
1150#endif
1151	}
1152
1153	if (m->hold_count != 0) {
1154		m->flags &= ~PG_ZERO;
1155		m->queue = PQ_HOLD;
1156	} else
1157		m->queue = PQ_FREE + m->pc;
1158	pq = &vm_page_queues[m->queue];
1159	pq->lcnt++;
1160	++(*pq->cnt);
1161
1162	/*
1163	 * Put zero'd pages on the end ( where we look for zero'd pages
1164	 * first ) and non-zerod pages at the head.
1165	 */
1166	if (m->flags & PG_ZERO) {
1167		TAILQ_INSERT_TAIL(&pq->pl, m, pageq);
1168		++vm_page_zero_count;
1169	} else {
1170		TAILQ_INSERT_HEAD(&pq->pl, m, pageq);
1171	}
1172	vm_page_free_wakeup();
1173	splx(s);
1174}
1175
1176/*
1177 *	vm_page_unmanage:
1178 *
1179 * 	Prevent PV management from being done on the page.  The page is
1180 *	removed from the paging queues as if it were wired, and as a
1181 *	consequence of no longer being managed the pageout daemon will not
1182 *	touch it (since there is no way to locate the pte mappings for the
1183 *	page).  madvise() calls that mess with the pmap will also no longer
1184 *	operate on the page.
1185 *
1186 *	Beyond that the page is still reasonably 'normal'.  Freeing the page
1187 *	will clear the flag.
1188 *
1189 *	This routine is used by OBJT_PHYS objects - objects using unswappable
1190 *	physical memory as backing store rather then swap-backed memory and
1191 *	will eventually be extended to support 4MB unmanaged physical
1192 *	mappings.
1193 */
1194void
1195vm_page_unmanage(vm_page_t m)
1196{
1197	int s;
1198
1199	s = splvm();
1200	if ((m->flags & PG_UNMANAGED) == 0) {
1201		if (m->wire_count == 0)
1202			vm_pageq_remove(m);
1203	}
1204	vm_page_flag_set(m, PG_UNMANAGED);
1205	splx(s);
1206}
1207
1208/*
1209 *	vm_page_wire:
1210 *
1211 *	Mark this page as wired down by yet
1212 *	another map, removing it from paging queues
1213 *	as necessary.
1214 *
1215 *	The page queues must be locked.
1216 *	This routine may not block.
1217 */
1218void
1219vm_page_wire(vm_page_t m)
1220{
1221	int s;
1222
1223	/*
1224	 * Only bump the wire statistics if the page is not already wired,
1225	 * and only unqueue the page if it is on some queue (if it is unmanaged
1226	 * it is already off the queues).
1227	 */
1228	s = splvm();
1229	if (m->wire_count == 0) {
1230		if ((m->flags & PG_UNMANAGED) == 0)
1231			vm_pageq_remove(m);
1232		cnt.v_wire_count++;
1233	}
1234	m->wire_count++;
1235	KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m));
1236	splx(s);
1237	vm_page_flag_set(m, PG_MAPPED);
1238}
1239
1240/*
1241 *	vm_page_unwire:
1242 *
1243 *	Release one wiring of this page, potentially
1244 *	enabling it to be paged again.
1245 *
1246 *	Many pages placed on the inactive queue should actually go
1247 *	into the cache, but it is difficult to figure out which.  What
1248 *	we do instead, if the inactive target is well met, is to put
1249 *	clean pages at the head of the inactive queue instead of the tail.
1250 *	This will cause them to be moved to the cache more quickly and
1251 *	if not actively re-referenced, freed more quickly.  If we just
1252 *	stick these pages at the end of the inactive queue, heavy filesystem
1253 *	meta-data accesses can cause an unnecessary paging load on memory bound
1254 *	processes.  This optimization causes one-time-use metadata to be
1255 *	reused more quickly.
1256 *
1257 *	BUT, if we are in a low-memory situation we have no choice but to
1258 *	put clean pages on the cache queue.
1259 *
1260 *	A number of routines use vm_page_unwire() to guarantee that the page
1261 *	will go into either the inactive or active queues, and will NEVER
1262 *	be placed in the cache - for example, just after dirtying a page.
1263 *	dirty pages in the cache are not allowed.
1264 *
1265 *	The page queues must be locked.
1266 *	This routine may not block.
1267 */
1268void
1269vm_page_unwire(vm_page_t m, int activate)
1270{
1271	int s;
1272
1273	s = splvm();
1274
1275	if (m->wire_count > 0) {
1276		m->wire_count--;
1277		if (m->wire_count == 0) {
1278			cnt.v_wire_count--;
1279			if (m->flags & PG_UNMANAGED) {
1280				;
1281			} else if (activate)
1282				vm_pageq_enqueue(PQ_ACTIVE, m);
1283			else {
1284				vm_page_flag_clear(m, PG_WINATCFLS);
1285				vm_pageq_enqueue(PQ_INACTIVE, m);
1286			}
1287		}
1288	} else {
1289		panic("vm_page_unwire: invalid wire count: %d\n", m->wire_count);
1290	}
1291	splx(s);
1292}
1293
1294
1295/*
1296 * Move the specified page to the inactive queue.  If the page has
1297 * any associated swap, the swap is deallocated.
1298 *
1299 * Normally athead is 0 resulting in LRU operation.  athead is set
1300 * to 1 if we want this page to be 'as if it were placed in the cache',
1301 * except without unmapping it from the process address space.
1302 *
1303 * This routine may not block.
1304 */
1305static __inline void
1306_vm_page_deactivate(vm_page_t m, int athead)
1307{
1308	int s;
1309
1310	GIANT_REQUIRED;
1311	/*
1312	 * Ignore if already inactive.
1313	 */
1314	if (m->queue == PQ_INACTIVE)
1315		return;
1316
1317	s = splvm();
1318	if (m->wire_count == 0 && (m->flags & PG_UNMANAGED) == 0) {
1319		if ((m->queue - m->pc) == PQ_CACHE)
1320			cnt.v_reactivated++;
1321		vm_page_flag_clear(m, PG_WINATCFLS);
1322		vm_pageq_remove(m);
1323		if (athead)
1324			TAILQ_INSERT_HEAD(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1325		else
1326			TAILQ_INSERT_TAIL(&vm_page_queues[PQ_INACTIVE].pl, m, pageq);
1327		m->queue = PQ_INACTIVE;
1328		vm_page_queues[PQ_INACTIVE].lcnt++;
1329		cnt.v_inactive_count++;
1330	}
1331	splx(s);
1332}
1333
1334void
1335vm_page_deactivate(vm_page_t m)
1336{
1337    _vm_page_deactivate(m, 0);
1338}
1339
1340/*
1341 * vm_page_try_to_cache:
1342 *
1343 * Returns 0 on failure, 1 on success
1344 */
1345int
1346vm_page_try_to_cache(vm_page_t m)
1347{
1348	GIANT_REQUIRED;
1349
1350	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1351	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1352		return (0);
1353	}
1354	vm_page_test_dirty(m);
1355	if (m->dirty)
1356		return (0);
1357	vm_page_cache(m);
1358	return (1);
1359}
1360
1361/*
1362 * vm_page_try_to_free()
1363 *
1364 *	Attempt to free the page.  If we cannot free it, we do nothing.
1365 *	1 is returned on success, 0 on failure.
1366 */
1367int
1368vm_page_try_to_free(vm_page_t m)
1369{
1370	if (m->dirty || m->hold_count || m->busy || m->wire_count ||
1371	    (m->flags & (PG_BUSY|PG_UNMANAGED))) {
1372		return (0);
1373	}
1374	vm_page_test_dirty(m);
1375	if (m->dirty)
1376		return (0);
1377	vm_page_busy(m);
1378	vm_page_protect(m, VM_PROT_NONE);
1379	vm_page_free(m);
1380	return (1);
1381}
1382
1383/*
1384 * vm_page_cache
1385 *
1386 * Put the specified page onto the page cache queue (if appropriate).
1387 *
1388 * This routine may not block.
1389 */
1390void
1391vm_page_cache(vm_page_t m)
1392{
1393	int s;
1394
1395	GIANT_REQUIRED;
1396	if ((m->flags & (PG_BUSY|PG_UNMANAGED)) || m->busy || m->wire_count) {
1397		printf("vm_page_cache: attempting to cache busy page\n");
1398		return;
1399	}
1400	if ((m->queue - m->pc) == PQ_CACHE)
1401		return;
1402
1403	/*
1404	 * Remove all pmaps and indicate that the page is not
1405	 * writeable or mapped.
1406	 */
1407	vm_page_protect(m, VM_PROT_NONE);
1408	if (m->dirty != 0) {
1409		panic("vm_page_cache: caching a dirty page, pindex: %ld",
1410			(long)m->pindex);
1411	}
1412	s = splvm();
1413	vm_pageq_remove_nowakeup(m);
1414	vm_pageq_enqueue(PQ_CACHE + m->pc, m);
1415	vm_page_free_wakeup();
1416	splx(s);
1417}
1418
1419/*
1420 * vm_page_dontneed
1421 *
1422 *	Cache, deactivate, or do nothing as appropriate.  This routine
1423 *	is typically used by madvise() MADV_DONTNEED.
1424 *
1425 *	Generally speaking we want to move the page into the cache so
1426 *	it gets reused quickly.  However, this can result in a silly syndrome
1427 *	due to the page recycling too quickly.  Small objects will not be
1428 *	fully cached.  On the otherhand, if we move the page to the inactive
1429 *	queue we wind up with a problem whereby very large objects
1430 *	unnecessarily blow away our inactive and cache queues.
1431 *
1432 *	The solution is to move the pages based on a fixed weighting.  We
1433 *	either leave them alone, deactivate them, or move them to the cache,
1434 *	where moving them to the cache has the highest weighting.
1435 *	By forcing some pages into other queues we eventually force the
1436 *	system to balance the queues, potentially recovering other unrelated
1437 *	space from active.  The idea is to not force this to happen too
1438 *	often.
1439 */
1440void
1441vm_page_dontneed(vm_page_t m)
1442{
1443	static int dnweight;
1444	int dnw;
1445	int head;
1446
1447	GIANT_REQUIRED;
1448	dnw = ++dnweight;
1449
1450	/*
1451	 * occassionally leave the page alone
1452	 */
1453	if ((dnw & 0x01F0) == 0 ||
1454	    m->queue == PQ_INACTIVE ||
1455	    m->queue - m->pc == PQ_CACHE
1456	) {
1457		if (m->act_count >= ACT_INIT)
1458			--m->act_count;
1459		return;
1460	}
1461
1462	if (m->dirty == 0)
1463		vm_page_test_dirty(m);
1464
1465	if (m->dirty || (dnw & 0x0070) == 0) {
1466		/*
1467		 * Deactivate the page 3 times out of 32.
1468		 */
1469		head = 0;
1470	} else {
1471		/*
1472		 * Cache the page 28 times out of every 32.  Note that
1473		 * the page is deactivated instead of cached, but placed
1474		 * at the head of the queue instead of the tail.
1475		 */
1476		head = 1;
1477	}
1478	_vm_page_deactivate(m, head);
1479}
1480
1481/*
1482 * Grab a page, waiting until we are waken up due to the page
1483 * changing state.  We keep on waiting, if the page continues
1484 * to be in the object.  If the page doesn't exist, allocate it.
1485 *
1486 * This routine may block.
1487 */
1488vm_page_t
1489vm_page_grab(vm_object_t object, vm_pindex_t pindex, int allocflags)
1490{
1491	vm_page_t m;
1492	int s, generation;
1493
1494	GIANT_REQUIRED;
1495retrylookup:
1496	if ((m = vm_page_lookup(object, pindex)) != NULL) {
1497		if (m->busy || (m->flags & PG_BUSY)) {
1498			generation = object->generation;
1499
1500			s = splvm();
1501			while ((object->generation == generation) &&
1502					(m->busy || (m->flags & PG_BUSY))) {
1503				vm_page_flag_set(m, PG_WANTED | PG_REFERENCED);
1504				tsleep(m, PVM, "pgrbwt", 0);
1505				if ((allocflags & VM_ALLOC_RETRY) == 0) {
1506					splx(s);
1507					return NULL;
1508				}
1509			}
1510			splx(s);
1511			goto retrylookup;
1512		} else {
1513			vm_page_busy(m);
1514			return m;
1515		}
1516	}
1517
1518	m = vm_page_alloc(object, pindex, allocflags & ~VM_ALLOC_RETRY);
1519	if (m == NULL) {
1520		VM_WAIT;
1521		if ((allocflags & VM_ALLOC_RETRY) == 0)
1522			return NULL;
1523		goto retrylookup;
1524	}
1525
1526	return m;
1527}
1528
1529/*
1530 * Mapping function for valid bits or for dirty bits in
1531 * a page.  May not block.
1532 *
1533 * Inputs are required to range within a page.
1534 */
1535__inline int
1536vm_page_bits(int base, int size)
1537{
1538	int first_bit;
1539	int last_bit;
1540
1541	KASSERT(
1542	    base + size <= PAGE_SIZE,
1543	    ("vm_page_bits: illegal base/size %d/%d", base, size)
1544	);
1545
1546	if (size == 0)		/* handle degenerate case */
1547		return (0);
1548
1549	first_bit = base >> DEV_BSHIFT;
1550	last_bit = (base + size - 1) >> DEV_BSHIFT;
1551
1552	return ((2 << last_bit) - (1 << first_bit));
1553}
1554
1555/*
1556 *	vm_page_set_validclean:
1557 *
1558 *	Sets portions of a page valid and clean.  The arguments are expected
1559 *	to be DEV_BSIZE aligned but if they aren't the bitmap is inclusive
1560 *	of any partial chunks touched by the range.  The invalid portion of
1561 *	such chunks will be zero'd.
1562 *
1563 *	This routine may not block.
1564 *
1565 *	(base + size) must be less then or equal to PAGE_SIZE.
1566 */
1567void
1568vm_page_set_validclean(vm_page_t m, int base, int size)
1569{
1570	int pagebits;
1571	int frag;
1572	int endoff;
1573
1574	GIANT_REQUIRED;
1575	if (size == 0)	/* handle degenerate case */
1576		return;
1577
1578	/*
1579	 * If the base is not DEV_BSIZE aligned and the valid
1580	 * bit is clear, we have to zero out a portion of the
1581	 * first block.
1582	 */
1583	if ((frag = base & ~(DEV_BSIZE - 1)) != base &&
1584	    (m->valid & (1 << (base >> DEV_BSHIFT))) == 0
1585	) {
1586		pmap_zero_page_area(
1587		    VM_PAGE_TO_PHYS(m),
1588		    frag,
1589		    base - frag
1590		);
1591	}
1592
1593	/*
1594	 * If the ending offset is not DEV_BSIZE aligned and the
1595	 * valid bit is clear, we have to zero out a portion of
1596	 * the last block.
1597	 */
1598	endoff = base + size;
1599	if ((frag = endoff & ~(DEV_BSIZE - 1)) != endoff &&
1600	    (m->valid & (1 << (endoff >> DEV_BSHIFT))) == 0
1601	) {
1602		pmap_zero_page_area(
1603		    VM_PAGE_TO_PHYS(m),
1604		    endoff,
1605		    DEV_BSIZE - (endoff & (DEV_BSIZE - 1))
1606		);
1607	}
1608
1609	/*
1610	 * Set valid, clear dirty bits.  If validating the entire
1611	 * page we can safely clear the pmap modify bit.  We also
1612	 * use this opportunity to clear the PG_NOSYNC flag.  If a process
1613	 * takes a write fault on a MAP_NOSYNC memory area the flag will
1614	 * be set again.
1615	 *
1616	 * We set valid bits inclusive of any overlap, but we can only
1617	 * clear dirty bits for DEV_BSIZE chunks that are fully within
1618	 * the range.
1619	 */
1620	pagebits = vm_page_bits(base, size);
1621	m->valid |= pagebits;
1622#if 0	/* NOT YET */
1623	if ((frag = base & (DEV_BSIZE - 1)) != 0) {
1624		frag = DEV_BSIZE - frag;
1625		base += frag;
1626		size -= frag;
1627		if (size < 0)
1628			size = 0;
1629	}
1630	pagebits = vm_page_bits(base, size & (DEV_BSIZE - 1));
1631#endif
1632	m->dirty &= ~pagebits;
1633	if (base == 0 && size == PAGE_SIZE) {
1634		pmap_clear_modify(m);
1635		vm_page_flag_clear(m, PG_NOSYNC);
1636	}
1637}
1638
1639#if 0
1640
1641void
1642vm_page_set_dirty(vm_page_t m, int base, int size)
1643{
1644	m->dirty |= vm_page_bits(base, size);
1645}
1646
1647#endif
1648
1649void
1650vm_page_clear_dirty(vm_page_t m, int base, int size)
1651{
1652	GIANT_REQUIRED;
1653	m->dirty &= ~vm_page_bits(base, size);
1654}
1655
1656/*
1657 *	vm_page_set_invalid:
1658 *
1659 *	Invalidates DEV_BSIZE'd chunks within a page.  Both the
1660 *	valid and dirty bits for the effected areas are cleared.
1661 *
1662 *	May not block.
1663 */
1664void
1665vm_page_set_invalid(vm_page_t m, int base, int size)
1666{
1667	int bits;
1668
1669	GIANT_REQUIRED;
1670	bits = vm_page_bits(base, size);
1671	m->valid &= ~bits;
1672	m->dirty &= ~bits;
1673	m->object->generation++;
1674}
1675
1676/*
1677 * vm_page_zero_invalid()
1678 *
1679 *	The kernel assumes that the invalid portions of a page contain
1680 *	garbage, but such pages can be mapped into memory by user code.
1681 *	When this occurs, we must zero out the non-valid portions of the
1682 *	page so user code sees what it expects.
1683 *
1684 *	Pages are most often semi-valid when the end of a file is mapped
1685 *	into memory and the file's size is not page aligned.
1686 */
1687void
1688vm_page_zero_invalid(vm_page_t m, boolean_t setvalid)
1689{
1690	int b;
1691	int i;
1692
1693	/*
1694	 * Scan the valid bits looking for invalid sections that
1695	 * must be zerod.  Invalid sub-DEV_BSIZE'd areas ( where the
1696	 * valid bit may be set ) have already been zerod by
1697	 * vm_page_set_validclean().
1698	 */
1699	for (b = i = 0; i <= PAGE_SIZE / DEV_BSIZE; ++i) {
1700		if (i == (PAGE_SIZE / DEV_BSIZE) ||
1701		    (m->valid & (1 << i))
1702		) {
1703			if (i > b) {
1704				pmap_zero_page_area(
1705				    VM_PAGE_TO_PHYS(m),
1706				    b << DEV_BSHIFT,
1707				    (i - b) << DEV_BSHIFT
1708				);
1709			}
1710			b = i + 1;
1711		}
1712	}
1713
1714	/*
1715	 * setvalid is TRUE when we can safely set the zero'd areas
1716	 * as being valid.  We can do this if there are no cache consistancy
1717	 * issues.  e.g. it is ok to do with UFS, but not ok to do with NFS.
1718	 */
1719	if (setvalid)
1720		m->valid = VM_PAGE_BITS_ALL;
1721}
1722
1723/*
1724 *	vm_page_is_valid:
1725 *
1726 *	Is (partial) page valid?  Note that the case where size == 0
1727 *	will return FALSE in the degenerate case where the page is
1728 *	entirely invalid, and TRUE otherwise.
1729 *
1730 *	May not block.
1731 */
1732int
1733vm_page_is_valid(vm_page_t m, int base, int size)
1734{
1735	int bits = vm_page_bits(base, size);
1736
1737	if (m->valid && ((m->valid & bits) == bits))
1738		return 1;
1739	else
1740		return 0;
1741}
1742
1743/*
1744 * update dirty bits from pmap/mmu.  May not block.
1745 */
1746void
1747vm_page_test_dirty(vm_page_t m)
1748{
1749	if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) {
1750		vm_page_dirty(m);
1751	}
1752}
1753
1754#include "opt_ddb.h"
1755#ifdef DDB
1756#include <sys/kernel.h>
1757
1758#include <ddb/ddb.h>
1759
1760DB_SHOW_COMMAND(page, vm_page_print_page_info)
1761{
1762	db_printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1763	db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1764	db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1765	db_printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1766	db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1767	db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1768	db_printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1769	db_printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1770	db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1771	db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1772}
1773
1774DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info)
1775{
1776	int i;
1777	db_printf("PQ_FREE:");
1778	for (i = 0; i < PQ_L2_SIZE; i++) {
1779		db_printf(" %d", vm_page_queues[PQ_FREE + i].lcnt);
1780	}
1781	db_printf("\n");
1782
1783	db_printf("PQ_CACHE:");
1784	for (i = 0; i < PQ_L2_SIZE; i++) {
1785		db_printf(" %d", vm_page_queues[PQ_CACHE + i].lcnt);
1786	}
1787	db_printf("\n");
1788
1789	db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n",
1790		vm_page_queues[PQ_ACTIVE].lcnt,
1791		vm_page_queues[PQ_INACTIVE].lcnt);
1792}
1793#endif /* DDB */
1794