vm_page.c revision 12904
1212904Sdim/*
2193323Sed * Copyright (c) 1991 Regents of the University of California.
3193323Sed * All rights reserved.
4193323Sed *
5193323Sed * This code is derived from software contributed to Berkeley by
6193323Sed * The Mach Operating System project at Carnegie-Mellon University.
7193323Sed *
8193323Sed * Redistribution and use in source and binary forms, with or without
9193323Sed * modification, are permitted provided that the following conditions
10193323Sed * are met:
11193323Sed * 1. Redistributions of source code must retain the above copyright
12193323Sed *    notice, this list of conditions and the following disclaimer.
13193323Sed * 2. Redistributions in binary form must reproduce the above copyright
14193323Sed *    notice, this list of conditions and the following disclaimer in the
15193323Sed *    documentation and/or other materials provided with the distribution.
16193323Sed * 3. All advertising materials mentioning features or use of this software
17193323Sed *    must display the following acknowledgement:
18193323Sed *	This product includes software developed by the University of
19193323Sed *	California, Berkeley and its contributors.
20193323Sed * 4. Neither the name of the University nor the names of its contributors
21193323Sed *    may be used to endorse or promote products derived from this software
22193323Sed *    without specific prior written permission.
23193323Sed *
24193323Sed * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25193323Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26193323Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27194710Sed * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28194710Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29194710Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30193323Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31194710Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32194710Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33194710Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34193323Sed * SUCH DAMAGE.
35193323Sed *
36193323Sed *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37193323Sed *	$Id: vm_page.c,v 1.43 1995/12/14 09:55:07 phk Exp $
38194710Sed */
39194710Sed
40193323Sed/*
41193323Sed * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42193323Sed * All rights reserved.
43193323Sed *
44193323Sed * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45194710Sed *
46194710Sed * Permission to use, copy, modify and distribute this software and
47194710Sed * its documentation is hereby granted, provided that both the copyright
48194710Sed * notice and this permission notice appear in all copies of the
49194710Sed * software, derivative works or modified versions, and any portions
50194710Sed * thereof, and that both notices appear in supporting documentation.
51193323Sed *
52193323Sed * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53193323Sed * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54193323Sed * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55193323Sed *
56218893Sdim * Carnegie Mellon requests users of this software to return to
57218893Sdim *
58218893Sdim *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59218893Sdim *  School of Computer Science
60218893Sdim *  Carnegie Mellon University
61218893Sdim *  Pittsburgh PA 15213-3890
62218893Sdim *
63218893Sdim * any improvements or extensions that they make and grant Carnegie the
64218893Sdim * rights to redistribute these changes.
65218893Sdim */
66218893Sdim
67218893Sdim/*
68218893Sdim *	Resident memory management module.
69218893Sdim */
70218893Sdim
71218893Sdim#include <sys/param.h>
72218893Sdim#include <sys/systm.h>
73218893Sdim#include <sys/proc.h>
74218893Sdim#include <sys/queue.h>
75218893Sdim#include <sys/vmmeter.h>
76218893Sdim
77218893Sdim#include <vm/vm.h>
78218893Sdim#include <vm/vm_param.h>
79218893Sdim#include <vm/vm_prot.h>
80218893Sdim#include <vm/lock.h>
81218893Sdim#include <vm/vm_kern.h>
82218893Sdim#include <vm/vm_object.h>
83218893Sdim#include <vm/vm_page.h>
84194178Sed#include <vm/vm_map.h>
85193323Sed#include <vm/vm_pageout.h>
86193323Sed#include <vm/vm_extern.h>
87194178Sed
88194178Sed#ifdef DDB
89193323Sedextern void	DDB_print_page_info __P((void));
90193323Sed#endif
91193323Sed
92193323Sed/*
93193323Sed *	Associated with page of user-allocatable memory is a
94193323Sed *	page structure.
95193323Sed */
96193323Sed
97193323Sedstatic struct pglist *vm_page_buckets;	/* Array of buckets */
98193323Sedstatic int vm_page_bucket_count;	/* How big is array? */
99212904Sdimstatic int vm_page_hash_mask;		/* Mask for hash function */
100194178Sed
101194710Sedstruct pglist vm_page_queue_free;
102194710Sedstruct pglist vm_page_queue_zero;
103193323Sedstruct pglist vm_page_queue_active;
104193323Sedstruct pglist vm_page_queue_inactive;
105194178Sedstruct pglist vm_page_queue_cache;
106194178Sed
107194178Sedvm_page_t vm_page_array;
108194178Sedstatic int vm_page_array_size;
109194178Sedlong first_page;
110194178Sedstatic long last_page;
111194178Sedstatic vm_size_t page_mask;
112194178Sedstatic int page_shift;
113194178Sedint vm_page_zero_count;
114194178Sed
115194710Sed/*
116194710Sed * map of contiguous valid DEV_BSIZE chunks in a page
117194710Sed * (this list is valid for page sizes upto 16*DEV_BSIZE)
118194710Sed */
119194710Sedstatic u_short vm_page_dev_bsize_chunks[] = {
120194178Sed	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
121194178Sed	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
122194178Sed};
123194178Sed
124193323Sedstatic inline __pure int
125194710Sed		vm_page_hash __P((vm_object_t object, vm_pindex_t pindex))
126194710Sed		__pure2;
127194710Sedstatic void	vm_page_unqueue __P((vm_page_t ));
128194710Sed
129194710Sed/*
130193323Sed *	vm_set_page_size:
131194178Sed *
132194178Sed *	Sets the page size, perhaps based upon the memory
133193323Sed *	size.  Must be called before any use of page-size
134194178Sed *	dependent functions.
135194178Sed *
136218893Sdim *	Sets page_shift and page_mask from cnt.v_page_size.
137194178Sed */
138194178Sedvoid
139194178Sedvm_set_page_size()
140194710Sed{
141194710Sed
142194710Sed	if (cnt.v_page_size == 0)
143194710Sed		cnt.v_page_size = DEFAULT_PAGE_SIZE;
144198090Srdivacky	page_mask = cnt.v_page_size - 1;
145194178Sed	if ((page_mask & cnt.v_page_size) != 0)
146194178Sed		panic("vm_set_page_size: page size not a power of two");
147194178Sed	for (page_shift = 0;; page_shift++)
148194178Sed		if ((1 << page_shift) == cnt.v_page_size)
149193323Sed			break;
150193323Sed}
151194178Sed
152194710Sed/*
153194710Sed *	vm_page_startup:
154194710Sed *
155194710Sed *	Initializes the resident memory module.
156198090Srdivacky *
157194178Sed *	Allocates memory for the page cells, and
158194178Sed *	for the object/offset-to-page hash table headers.
159194178Sed *	Each page cell is initialized and placed on the free list.
160194178Sed */
161194178Sed
162vm_offset_t
163vm_page_startup(starta, enda, vaddr)
164	register vm_offset_t starta;
165	vm_offset_t enda;
166	register vm_offset_t vaddr;
167{
168	register vm_offset_t mapped;
169	register vm_page_t m;
170	register struct pglist *bucket;
171	vm_size_t npages, page_range;
172	register vm_offset_t new_start;
173	int i;
174	vm_offset_t pa;
175	int nblocks;
176	vm_offset_t first_managed_page;
177
178	/* the biggest memory array is the second group of pages */
179	vm_offset_t start;
180	vm_offset_t biggestone, biggestsize;
181
182	vm_offset_t total;
183
184	total = 0;
185	biggestsize = 0;
186	biggestone = 0;
187	nblocks = 0;
188	vaddr = round_page(vaddr);
189
190	for (i = 0; phys_avail[i + 1]; i += 2) {
191		phys_avail[i] = round_page(phys_avail[i]);
192		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
193	}
194
195	for (i = 0; phys_avail[i + 1]; i += 2) {
196		int size = phys_avail[i + 1] - phys_avail[i];
197
198		if (size > biggestsize) {
199			biggestone = i;
200			biggestsize = size;
201		}
202		++nblocks;
203		total += size;
204	}
205
206	start = phys_avail[biggestone];
207
208	/*
209	 * Initialize the queue headers for the free queue, the active queue
210	 * and the inactive queue.
211	 */
212
213	TAILQ_INIT(&vm_page_queue_free);
214	TAILQ_INIT(&vm_page_queue_zero);
215	TAILQ_INIT(&vm_page_queue_active);
216	TAILQ_INIT(&vm_page_queue_inactive);
217	TAILQ_INIT(&vm_page_queue_cache);
218
219	/*
220	 * Allocate (and initialize) the hash table buckets.
221	 *
222	 * The number of buckets MUST BE a power of 2, and the actual value is
223	 * the next power of 2 greater than the number of physical pages in
224	 * the system.
225	 *
226	 * Note: This computation can be tweaked if desired.
227	 */
228	vm_page_buckets = (struct pglist *) vaddr;
229	bucket = vm_page_buckets;
230	if (vm_page_bucket_count == 0) {
231		vm_page_bucket_count = 1;
232		while (vm_page_bucket_count < atop(total))
233			vm_page_bucket_count <<= 1;
234	}
235	vm_page_hash_mask = vm_page_bucket_count - 1;
236
237	/*
238	 * Validate these addresses.
239	 */
240
241	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
242	new_start = round_page(new_start);
243	mapped = vaddr;
244	vaddr = pmap_map(mapped, start, new_start,
245	    VM_PROT_READ | VM_PROT_WRITE);
246	start = new_start;
247	bzero((caddr_t) mapped, vaddr - mapped);
248	mapped = vaddr;
249
250	for (i = 0; i < vm_page_bucket_count; i++) {
251		TAILQ_INIT(bucket);
252		bucket++;
253	}
254
255	/*
256	 * round (or truncate) the addresses to our page size.
257	 */
258
259	/*
260	 * Pre-allocate maps and map entries that cannot be dynamically
261	 * allocated via malloc().  The maps include the kernel_map and
262	 * kmem_map which must be initialized before malloc() will work
263	 * (obviously).  Also could include pager maps which would be
264	 * allocated before kmeminit.
265	 *
266	 * Allow some kernel map entries... this should be plenty since people
267	 * shouldn't be cluttering up the kernel map (they should use their
268	 * own maps).
269	 */
270
271	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
272	    MAX_KMAPENT * sizeof(struct vm_map_entry);
273	kentry_data_size = round_page(kentry_data_size);
274	kentry_data = (vm_offset_t) vaddr;
275	vaddr += kentry_data_size;
276
277	/*
278	 * Validate these zone addresses.
279	 */
280
281	new_start = start + (vaddr - mapped);
282	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
283	bzero((caddr_t) mapped, (vaddr - mapped));
284	start = round_page(new_start);
285
286	/*
287	 * Compute the number of pages of memory that will be available for
288	 * use (taking into account the overhead of a page structure per
289	 * page).
290	 */
291
292	first_page = phys_avail[0] / PAGE_SIZE;
293	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
294
295	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
296	npages = (total - (page_range * sizeof(struct vm_page)) -
297	    (start - phys_avail[biggestone])) / PAGE_SIZE;
298
299	/*
300	 * Initialize the mem entry structures now, and put them in the free
301	 * queue.
302	 */
303
304	vm_page_array = (vm_page_t) vaddr;
305	mapped = vaddr;
306
307	/*
308	 * Validate these addresses.
309	 */
310
311	new_start = round_page(start + page_range * sizeof(struct vm_page));
312	mapped = pmap_map(mapped, start, new_start,
313	    VM_PROT_READ | VM_PROT_WRITE);
314	start = new_start;
315
316	first_managed_page = start / PAGE_SIZE;
317
318	/*
319	 * Clear all of the page structures
320	 */
321	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
322	vm_page_array_size = page_range;
323
324	cnt.v_page_count = 0;
325	cnt.v_free_count = 0;
326	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
327		if (i == biggestone)
328			pa = ptoa(first_managed_page);
329		else
330			pa = phys_avail[i];
331		while (pa < phys_avail[i + 1] && npages-- > 0) {
332			++cnt.v_page_count;
333			++cnt.v_free_count;
334			m = PHYS_TO_VM_PAGE(pa);
335			m->flags = PG_FREE;
336			m->phys_addr = pa;
337			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
338			pa += PAGE_SIZE;
339		}
340	}
341
342	return (mapped);
343}
344
345/*
346 *	vm_page_hash:
347 *
348 *	Distributes the object/offset key pair among hash buckets.
349 *
350 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
351 */
352static inline __pure int
353vm_page_hash(object, pindex)
354	vm_object_t object;
355	vm_pindex_t pindex;
356{
357	return ((unsigned) object + pindex) & vm_page_hash_mask;
358}
359
360/*
361 *	vm_page_insert:		[ internal use only ]
362 *
363 *	Inserts the given mem entry into the object/object-page
364 *	table and object list.
365 *
366 *	The object and page must be locked, and must be splhigh.
367 */
368
369inline void
370vm_page_insert(mem, object, pindex)
371	register vm_page_t mem;
372	register vm_object_t object;
373	register vm_pindex_t pindex;
374{
375	register struct pglist *bucket;
376
377	if (mem->flags & PG_TABLED)
378		panic("vm_page_insert: already inserted");
379
380	/*
381	 * Record the object/offset pair in this page
382	 */
383
384	mem->object = object;
385	mem->pindex = pindex;
386
387	/*
388	 * Insert it into the object_object/offset hash table
389	 */
390
391	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
392	TAILQ_INSERT_TAIL(bucket, mem, hashq);
393
394	/*
395	 * Now link into the object's list of backed pages.
396	 */
397
398	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
399	mem->flags |= PG_TABLED;
400
401	/*
402	 * And show that the object has one more resident page.
403	 */
404
405	object->resident_page_count++;
406}
407
408/*
409 *	vm_page_remove:		[ internal use only ]
410 *				NOTE: used by device pager as well -wfj
411 *
412 *	Removes the given mem entry from the object/offset-page
413 *	table and the object page list.
414 *
415 *	The object and page must be locked, and at splhigh.
416 */
417
418inline void
419vm_page_remove(mem)
420	register vm_page_t mem;
421{
422	register struct pglist *bucket;
423
424	if (!(mem->flags & PG_TABLED))
425		return;
426
427	/*
428	 * Remove from the object_object/offset hash table
429	 */
430
431	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->pindex)];
432	TAILQ_REMOVE(bucket, mem, hashq);
433
434	/*
435	 * Now remove from the object's list of backed pages.
436	 */
437
438	TAILQ_REMOVE(&mem->object->memq, mem, listq);
439
440	/*
441	 * And show that the object has one fewer resident page.
442	 */
443
444	mem->object->resident_page_count--;
445
446	mem->flags &= ~PG_TABLED;
447}
448
449/*
450 *	vm_page_lookup:
451 *
452 *	Returns the page associated with the object/offset
453 *	pair specified; if none is found, NULL is returned.
454 *
455 *	The object must be locked.  No side effects.
456 */
457
458vm_page_t
459vm_page_lookup(object, pindex)
460	register vm_object_t object;
461	register vm_pindex_t pindex;
462{
463	register vm_page_t mem;
464	register struct pglist *bucket;
465	int s;
466
467	/*
468	 * Search the hash table for this object/offset pair
469	 */
470
471	bucket = &vm_page_buckets[vm_page_hash(object, pindex)];
472
473	s = splhigh();
474	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
475		if ((mem->object == object) && (mem->pindex == pindex)) {
476			splx(s);
477			return (mem);
478		}
479	}
480
481	splx(s);
482	return (NULL);
483}
484
485/*
486 *	vm_page_rename:
487 *
488 *	Move the given memory entry from its
489 *	current object to the specified target object/offset.
490 *
491 *	The object must be locked.
492 */
493void
494vm_page_rename(mem, new_object, new_pindex)
495	register vm_page_t mem;
496	register vm_object_t new_object;
497	vm_pindex_t new_pindex;
498{
499	int s;
500
501	s = splhigh();
502	vm_page_remove(mem);
503	vm_page_insert(mem, new_object, new_pindex);
504	splx(s);
505}
506
507/*
508 * vm_page_unqueue must be called at splhigh();
509 */
510static inline void
511vm_page_unqueue(vm_page_t mem)
512{
513	int origflags;
514
515	origflags = mem->flags;
516
517	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
518		return;
519
520	if (origflags & PG_ACTIVE) {
521		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
522		cnt.v_active_count--;
523		mem->flags &= ~PG_ACTIVE;
524	} else if (origflags & PG_INACTIVE) {
525		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
526		cnt.v_inactive_count--;
527		mem->flags &= ~PG_INACTIVE;
528	} else if (origflags & PG_CACHE) {
529		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
530		cnt.v_cache_count--;
531		mem->flags &= ~PG_CACHE;
532		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
533			pagedaemon_wakeup();
534	}
535	return;
536}
537
538/*
539 *	vm_page_alloc:
540 *
541 *	Allocate and return a memory cell associated
542 *	with this VM object/offset pair.
543 *
544 *	page_req classes:
545 *	VM_ALLOC_NORMAL		normal process request
546 *	VM_ALLOC_SYSTEM		system *really* needs a page
547 *	VM_ALLOC_INTERRUPT	interrupt time request
548 *	or in:
549 *	VM_ALLOC_ZERO		zero page
550 *
551 *	Object must be locked.
552 */
553vm_page_t
554vm_page_alloc(object, pindex, page_req)
555	vm_object_t object;
556	vm_pindex_t pindex;
557	int page_req;
558{
559	register vm_page_t mem;
560	int s;
561
562#ifdef DIAGNOSTIC
563	mem = vm_page_lookup(object, pindex);
564	if (mem)
565		panic("vm_page_alloc: page already allocated");
566#endif
567
568	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
569		page_req = VM_ALLOC_SYSTEM;
570	};
571
572	s = splhigh();
573
574	switch ((page_req & ~(VM_ALLOC_ZERO))) {
575	case VM_ALLOC_NORMAL:
576		if (cnt.v_free_count >= cnt.v_free_reserved) {
577			if (page_req & VM_ALLOC_ZERO) {
578				mem = vm_page_queue_zero.tqh_first;
579				if (mem) {
580					--vm_page_zero_count;
581					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
582					mem->flags = PG_BUSY|PG_ZERO;
583				} else {
584					mem = vm_page_queue_free.tqh_first;
585					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
586					mem->flags = PG_BUSY;
587				}
588			} else {
589				mem = vm_page_queue_free.tqh_first;
590				if (mem) {
591					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
592					mem->flags = PG_BUSY;
593				} else {
594					--vm_page_zero_count;
595					mem = vm_page_queue_zero.tqh_first;
596					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
597					mem->flags = PG_BUSY|PG_ZERO;
598				}
599			}
600			cnt.v_free_count--;
601		} else {
602			mem = vm_page_queue_cache.tqh_first;
603			if (mem != NULL) {
604				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
605				vm_page_remove(mem);
606				mem->flags = PG_BUSY;
607				cnt.v_cache_count--;
608			} else {
609				splx(s);
610				pagedaemon_wakeup();
611				return (NULL);
612			}
613		}
614		break;
615
616	case VM_ALLOC_SYSTEM:
617		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
618		    ((cnt.v_cache_count == 0) &&
619		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
620			if (page_req & VM_ALLOC_ZERO) {
621				mem = vm_page_queue_zero.tqh_first;
622				if (mem) {
623					--vm_page_zero_count;
624					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
625					mem->flags = PG_BUSY|PG_ZERO;
626				} else {
627					mem = vm_page_queue_free.tqh_first;
628					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
629					mem->flags = PG_BUSY;
630				}
631			} else {
632				mem = vm_page_queue_free.tqh_first;
633				if (mem) {
634					TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
635					mem->flags = PG_BUSY;
636				} else {
637					--vm_page_zero_count;
638					mem = vm_page_queue_zero.tqh_first;
639					TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
640					mem->flags = PG_BUSY|PG_ZERO;
641				}
642			}
643			cnt.v_free_count--;
644		} else {
645			mem = vm_page_queue_cache.tqh_first;
646			if (mem != NULL) {
647				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
648				vm_page_remove(mem);
649				mem->flags = PG_BUSY;
650				cnt.v_cache_count--;
651			} else {
652				splx(s);
653				pagedaemon_wakeup();
654				return (NULL);
655			}
656		}
657		break;
658
659	case VM_ALLOC_INTERRUPT:
660		if (cnt.v_free_count > 0) {
661			mem = vm_page_queue_free.tqh_first;
662			if (mem) {
663				TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
664				mem->flags = PG_BUSY;
665			} else {
666				--vm_page_zero_count;
667				mem = vm_page_queue_zero.tqh_first;
668				TAILQ_REMOVE(&vm_page_queue_zero, mem, pageq);
669				mem->flags = PG_BUSY|PG_ZERO;
670			}
671			cnt.v_free_count--;
672		} else {
673			splx(s);
674			pagedaemon_wakeup();
675			return NULL;
676		}
677		break;
678
679	default:
680		panic("vm_page_alloc: invalid allocation class");
681	}
682
683	mem->wire_count = 0;
684	mem->hold_count = 0;
685	mem->act_count = 0;
686	mem->busy = 0;
687	mem->valid = 0;
688	mem->dirty = 0;
689	mem->bmapped = 0;
690
691	/* XXX before splx until vm_page_insert is safe */
692	vm_page_insert(mem, object, pindex);
693
694	splx(s);
695
696	/*
697	 * Don't wakeup too often - wakeup the pageout daemon when
698	 * we would be nearly out of memory.
699	 */
700	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
701	    (cnt.v_free_count < cnt.v_pageout_free_min))
702		pagedaemon_wakeup();
703
704	return (mem);
705}
706
707vm_offset_t
708vm_page_alloc_contig(size, low, high, alignment)
709	vm_offset_t size;
710	vm_offset_t low;
711	vm_offset_t high;
712	vm_offset_t alignment;
713{
714	int i, s, start;
715	vm_offset_t addr, phys, tmp_addr;
716	vm_page_t pga = vm_page_array;
717
718	if ((alignment & (alignment - 1)) != 0)
719		panic("vm_page_alloc_contig: alignment must be a power of 2");
720
721	start = 0;
722	s = splhigh();
723again:
724	/*
725	 * Find first page in array that is free, within range, and aligned.
726	 */
727	for (i = start; i < cnt.v_page_count; i++) {
728		phys = VM_PAGE_TO_PHYS(&pga[i]);
729		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
730		    (phys >= low) && (phys < high) &&
731		    ((phys & (alignment - 1)) == 0))
732			break;
733	}
734
735	/*
736	 * If the above failed or we will exceed the upper bound, fail.
737	 */
738	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
739		splx(s);
740		return (NULL);
741	}
742	start = i;
743
744	/*
745	 * Check successive pages for contiguous and free.
746	 */
747	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
748		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
749			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
750		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
751			start++;
752			goto again;
753		}
754	}
755
756	/*
757	 * We've found a contiguous chunk that meets are requirements.
758	 * Allocate kernel VM, unfree and assign the physical pages to it and
759	 * return kernel VM pointer.
760	 */
761	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
762
763	for (i = start; i < (start + size / PAGE_SIZE); i++) {
764		vm_page_t m = &pga[i];
765
766		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
767		cnt.v_free_count--;
768		m->valid = VM_PAGE_BITS_ALL;
769		m->flags = 0;
770		m->dirty = 0;
771		m->wire_count = 0;
772		m->act_count = 0;
773		m->bmapped = 0;
774		m->busy = 0;
775		vm_page_insert(m, kernel_object,
776			OFF_TO_IDX(tmp_addr - VM_MIN_KERNEL_ADDRESS));
777		vm_page_wire(m);
778		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
779		tmp_addr += PAGE_SIZE;
780	}
781
782	splx(s);
783	return (addr);
784}
785
786/*
787 *	vm_page_free:
788 *
789 *	Returns the given page to the free list,
790 *	disassociating it with any VM object.
791 *
792 *	Object and page must be locked prior to entry.
793 */
794void
795vm_page_free(mem)
796	register vm_page_t mem;
797{
798	int s;
799	int flags;
800
801	s = splhigh();
802	vm_page_remove(mem);
803	vm_page_unqueue(mem);
804
805	flags = mem->flags;
806	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
807		if (flags & PG_FREE)
808			panic("vm_page_free: freeing free page");
809		printf("vm_page_free: pindex(%ld), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
810		    mem->pindex, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
811		panic("vm_page_free: freeing busy page");
812	}
813
814	if ((flags & PG_WANTED) != 0)
815		wakeup(mem);
816	if ((flags & PG_FICTITIOUS) == 0) {
817		if (mem->wire_count) {
818			if (mem->wire_count > 1) {
819				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
820				panic("vm_page_free: invalid wire count");
821			}
822			cnt.v_wire_count--;
823			mem->wire_count = 0;
824		}
825		mem->flags |= PG_FREE;
826		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
827		splx(s);
828		/*
829		 * if pageout daemon needs pages, then tell it that there are
830		 * some free.
831		 */
832		if (vm_pageout_pages_needed) {
833			wakeup(&vm_pageout_pages_needed);
834			vm_pageout_pages_needed = 0;
835		}
836
837		cnt.v_free_count++;
838		/*
839		 * wakeup processes that are waiting on memory if we hit a
840		 * high water mark. And wakeup scheduler process if we have
841		 * lots of memory. this process will swapin processes.
842		 */
843		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
844			wakeup(&cnt.v_free_count);
845			wakeup(&proc0);
846		}
847	} else {
848		splx(s);
849	}
850	cnt.v_tfree++;
851}
852
853
854/*
855 *	vm_page_wire:
856 *
857 *	Mark this page as wired down by yet
858 *	another map, removing it from paging queues
859 *	as necessary.
860 *
861 *	The page queues must be locked.
862 */
863void
864vm_page_wire(mem)
865	register vm_page_t mem;
866{
867	int s;
868
869	if (mem->wire_count == 0) {
870		s = splhigh();
871		vm_page_unqueue(mem);
872		splx(s);
873		cnt.v_wire_count++;
874	}
875	mem->flags |= PG_WRITEABLE|PG_MAPPED;
876	mem->wire_count++;
877}
878
879/*
880 *	vm_page_unwire:
881 *
882 *	Release one wiring of this page, potentially
883 *	enabling it to be paged again.
884 *
885 *	The page queues must be locked.
886 */
887void
888vm_page_unwire(mem)
889	register vm_page_t mem;
890{
891	int s;
892
893	s = splhigh();
894
895	if (mem->wire_count)
896		mem->wire_count--;
897	if (mem->wire_count == 0) {
898		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
899		cnt.v_active_count++;
900		mem->flags |= PG_ACTIVE;
901		cnt.v_wire_count--;
902	}
903	splx(s);
904}
905
906/*
907 *	vm_page_activate:
908 *
909 *	Put the specified page on the active list (if appropriate).
910 *
911 *	The page queues must be locked.
912 */
913void
914vm_page_activate(m)
915	register vm_page_t m;
916{
917	int s;
918
919	s = splhigh();
920	if (m->flags & PG_ACTIVE)
921		panic("vm_page_activate: already active");
922
923	if (m->flags & PG_CACHE)
924		cnt.v_reactivated++;
925
926	vm_page_unqueue(m);
927
928	if (m->wire_count == 0) {
929		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
930		m->flags |= PG_ACTIVE;
931		if (m->act_count < 5)
932			m->act_count = 5;
933		else if( m->act_count < ACT_MAX)
934			m->act_count += 1;
935		cnt.v_active_count++;
936	}
937	splx(s);
938}
939
940/*
941 *	vm_page_deactivate:
942 *
943 *	Returns the given page to the inactive list,
944 *	indicating that no physical maps have access
945 *	to this page.  [Used by the physical mapping system.]
946 *
947 *	The page queues must be locked.
948 */
949void
950vm_page_deactivate(m)
951	register vm_page_t m;
952{
953	int spl;
954
955	/*
956	 * Only move active pages -- ignore locked or already inactive ones.
957	 *
958	 * XXX: sometimes we get pages which aren't wired down or on any queue -
959	 * we need to put them on the inactive queue also, otherwise we lose
960	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
961	 */
962
963	spl = splhigh();
964	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
965	    m->hold_count == 0) {
966		if (m->flags & PG_CACHE)
967			cnt.v_reactivated++;
968		vm_page_unqueue(m);
969		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
970		m->flags |= PG_INACTIVE;
971		cnt.v_inactive_count++;
972		m->act_count = 0;
973	}
974	splx(spl);
975}
976
977/*
978 * vm_page_cache
979 *
980 * Put the specified page onto the page cache queue (if appropriate).
981 */
982void
983vm_page_cache(m)
984	register vm_page_t m;
985{
986	int s;
987
988	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
989	    m->bmapped)
990		return;
991
992	s = splhigh();
993	vm_page_unqueue(m);
994	vm_page_protect(m, VM_PROT_NONE);
995
996	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
997	m->flags |= PG_CACHE;
998	cnt.v_cache_count++;
999	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
1000		wakeup(&cnt.v_free_count);
1001		wakeup(&proc0);
1002	}
1003	if (vm_pageout_pages_needed) {
1004		wakeup(&vm_pageout_pages_needed);
1005		vm_pageout_pages_needed = 0;
1006	}
1007
1008	splx(s);
1009}
1010
1011/*
1012 *	vm_page_zero_fill:
1013 *
1014 *	Zero-fill the specified page.
1015 *	Written as a standard pagein routine, to
1016 *	be used by the zero-fill object.
1017 */
1018boolean_t
1019vm_page_zero_fill(m)
1020	vm_page_t m;
1021{
1022	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1023	return (TRUE);
1024}
1025
1026/*
1027 *	vm_page_copy:
1028 *
1029 *	Copy one page to another
1030 */
1031void
1032vm_page_copy(src_m, dest_m)
1033	vm_page_t src_m;
1034	vm_page_t dest_m;
1035{
1036	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1037	dest_m->valid = VM_PAGE_BITS_ALL;
1038}
1039
1040
1041/*
1042 * mapping function for valid bits or for dirty bits in
1043 * a page
1044 */
1045inline int
1046vm_page_bits(int base, int size)
1047{
1048	u_short chunk;
1049
1050	if ((base == 0) && (size >= PAGE_SIZE))
1051		return VM_PAGE_BITS_ALL;
1052	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1053	base = (base % PAGE_SIZE) / DEV_BSIZE;
1054	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1055	return (chunk << base) & VM_PAGE_BITS_ALL;
1056}
1057
1058/*
1059 * set a page valid and clean
1060 */
1061void
1062vm_page_set_validclean(m, base, size)
1063	vm_page_t m;
1064	int base;
1065	int size;
1066{
1067	int pagebits = vm_page_bits(base, size);
1068	m->valid |= pagebits;
1069	m->dirty &= ~pagebits;
1070	if( base == 0 && size == PAGE_SIZE)
1071		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1072}
1073
1074/*
1075 * set a page (partially) invalid
1076 */
1077void
1078vm_page_set_invalid(m, base, size)
1079	vm_page_t m;
1080	int base;
1081	int size;
1082{
1083	int bits;
1084
1085	m->valid &= ~(bits = vm_page_bits(base, size));
1086	if (m->valid == 0)
1087		m->dirty &= ~bits;
1088}
1089
1090/*
1091 * is (partial) page valid?
1092 */
1093int
1094vm_page_is_valid(m, base, size)
1095	vm_page_t m;
1096	int base;
1097	int size;
1098{
1099	int bits = vm_page_bits(base, size);
1100
1101	if (m->valid && ((m->valid & bits) == bits))
1102		return 1;
1103	else
1104		return 0;
1105}
1106
1107
1108
1109void
1110vm_page_test_dirty(m)
1111	vm_page_t m;
1112{
1113	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1114	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1115		m->dirty = VM_PAGE_BITS_ALL;
1116	}
1117}
1118
1119#ifdef DDB
1120void
1121DDB_print_page_info(void)
1122{
1123	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1124	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1125	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1126	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1127	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1128	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1129	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1130	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1131	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1132	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1133}
1134#endif
1135