vm_page.c revision 7090
1161202Simp/*
2161202Simp * Copyright (c) 1991 Regents of the University of California.
3161202Simp * All rights reserved.
4161202Simp *
5161202Simp * This code is derived from software contributed to Berkeley by
6161202Simp * The Mach Operating System project at Carnegie-Mellon University.
7161202Simp *
8161202Simp * Redistribution and use in source and binary forms, with or without
9161202Simp * modification, are permitted provided that the following conditions
10161202Simp * are met:
11161202Simp * 1. Redistributions of source code must retain the above copyright
12161202Simp *    notice, this list of conditions and the following disclaimer.
13161202Simp * 2. Redistributions in binary form must reproduce the above copyright
14161202Simp *    notice, this list of conditions and the following disclaimer in the
15161202Simp *    documentation and/or other materials provided with the distribution.
16161202Simp * 3. All advertising materials mentioning features or use of this software
17161202Simp *    must display the following acknowledgement:
18161202Simp *	This product includes software developed by the University of
19161202Simp *	California, Berkeley and its contributors.
20161202Simp * 4. Neither the name of the University nor the names of its contributors
21161202Simp *    may be used to endorse or promote products derived from this software
22161202Simp *    without specific prior written permission.
23161202Simp *
24161202Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25161202Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26161202Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27161202Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28161202Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29161202Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30161202Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31161202Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32161202Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33161202Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34161202Simp * SUCH DAMAGE.
35161202Simp *
36161202Simp *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37161202Simp *	$Id: vm_page.c,v 1.23 1995/03/01 23:29:59 davidg Exp $
38161202Simp */
39161202Simp
40161202Simp/*
41161202Simp * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42161202Simp * All rights reserved.
43161202Simp *
44298826Spfg * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45161202Simp *
46161202Simp * Permission to use, copy, modify and distribute this software and
47161202Simp * its documentation is hereby granted, provided that both the copyright
48161202Simp * notice and this permission notice appear in all copies of the
49161202Simp * software, derivative works or modified versions, and any portions
50161202Simp * thereof, and that both notices appear in supporting documentation.
51161202Simp *
52161202Simp * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53161202Simp * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54161202Simp * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55161202Simp *
56161202Simp * Carnegie Mellon requests users of this software to return to
57161202Simp *
58161202Simp *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59161202Simp *  School of Computer Science
60161202Simp *  Carnegie Mellon University
61161202Simp *  Pittsburgh PA 15213-3890
62161202Simp *
63161202Simp * any improvements or extensions that they make and grant Carnegie the
64161202Simp * rights to redistribute these changes.
65161202Simp */
66161202Simp
67161202Simp/*
68161202Simp *	Resident memory management module.
69161202Simp */
70161202Simp
71161202Simp#include <sys/param.h>
72161202Simp#include <sys/systm.h>
73161202Simp#include <sys/proc.h>
74161202Simp
75161202Simp#include <vm/vm.h>
76161202Simp#include <vm/vm_kern.h>
77161202Simp#include <vm/vm_page.h>
78161202Simp#include <vm/vm_map.h>
79161202Simp#include <vm/vm_pageout.h>
80161202Simp
81161202Simp/*
82161202Simp *	Associated with page of user-allocatable memory is a
83161202Simp *	page structure.
84161202Simp */
85161202Simp
86161202Simpstruct pglist *vm_page_buckets;	/* Array of buckets */
87161202Simpint vm_page_bucket_count = 0;	/* How big is array? */
88161202Simpint vm_page_hash_mask;		/* Mask for hash function */
89161202Simpsimple_lock_data_t bucket_lock;	/* lock for all buckets XXX */
90161202Simp
91161202Simpstruct pglist vm_page_queue_free;
92161202Simpstruct pglist vm_page_queue_active;
93161202Simpstruct pglist vm_page_queue_inactive;
94161202Simpstruct pglist vm_page_queue_cache;
95161202Simpsimple_lock_data_t vm_page_queue_lock;
96163596Simpsimple_lock_data_t vm_page_queue_free_lock;
97161202Simp
98161202Simp/* has physical page allocation been initialized? */
99161202Simpboolean_t vm_page_startup_initialized;
100161202Simp
101161202Simpvm_page_t vm_page_array;
102161202Simpint vm_page_array_size;
103161202Simplong first_page;
104161202Simplong last_page;
105161202Simpvm_offset_t first_phys_addr;
106161202Simpvm_offset_t last_phys_addr;
107161202Simpvm_size_t page_mask;
108161202Simpint page_shift;
109161202Simp
110161202Simp/*
111161202Simp * map of contiguous valid DEV_BSIZE chunks in a page
112161202Simp * (this list is valid for page sizes upto 16*DEV_BSIZE)
113161202Simp */
114161202Simpstatic u_short vm_page_dev_bsize_chunks[] = {
115161202Simp	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
116161202Simp	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
117161202Simp};
118161202Simp
119161202Simp
120161202Simp/*
121161202Simp *	vm_set_page_size:
122161202Simp *
123161202Simp *	Sets the page size, perhaps based upon the memory
124161202Simp *	size.  Must be called before any use of page-size
125161202Simp *	dependent functions.
126161202Simp *
127161202Simp *	Sets page_shift and page_mask from cnt.v_page_size.
128161202Simp */
129161202Simpvoid
130161202Simpvm_set_page_size()
131161202Simp{
132161202Simp
133161202Simp	if (cnt.v_page_size == 0)
134161202Simp		cnt.v_page_size = DEFAULT_PAGE_SIZE;
135161202Simp	page_mask = cnt.v_page_size - 1;
136161202Simp	if ((page_mask & cnt.v_page_size) != 0)
137161202Simp		panic("vm_set_page_size: page size not a power of two");
138161202Simp	for (page_shift = 0;; page_shift++)
139161202Simp		if ((1 << page_shift) == cnt.v_page_size)
140161202Simp			break;
141161202Simp}
142161202Simp
143161202Simp/*
144161202Simp *	vm_page_startup:
145161202Simp *
146161202Simp *	Initializes the resident memory module.
147161202Simp *
148161202Simp *	Allocates memory for the page cells, and
149161202Simp *	for the object/offset-to-page hash table headers.
150161202Simp *	Each page cell is initialized and placed on the free list.
151161202Simp */
152161202Simp
153161202Simpvm_offset_t
154161202Simpvm_page_startup(starta, enda, vaddr)
155161202Simp	register vm_offset_t starta;
156161202Simp	vm_offset_t enda;
157161202Simp	register vm_offset_t vaddr;
158161202Simp{
159161202Simp	register vm_offset_t mapped;
160161202Simp	register vm_page_t m;
161161202Simp	register struct pglist *bucket;
162161202Simp	vm_size_t npages, page_range;
163161202Simp	register vm_offset_t new_start;
164161202Simp	int i;
165161202Simp	vm_offset_t pa;
166161202Simp	int nblocks;
167161202Simp	vm_offset_t first_managed_page;
168161202Simp
169161202Simp	/* the biggest memory array is the second group of pages */
170161202Simp	vm_offset_t start;
171161202Simp	vm_offset_t biggestone, biggestsize;
172161202Simp
173161202Simp	vm_offset_t total;
174161202Simp
175161202Simp	total = 0;
176161202Simp	biggestsize = 0;
177161202Simp	biggestone = 0;
178161202Simp	nblocks = 0;
179161202Simp	vaddr = round_page(vaddr);
180161202Simp
181161202Simp	for (i = 0; phys_avail[i + 1]; i += 2) {
182161202Simp		phys_avail[i] = round_page(phys_avail[i]);
183161202Simp		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
184161202Simp	}
185161202Simp
186161202Simp	for (i = 0; phys_avail[i + 1]; i += 2) {
187161202Simp		int size = phys_avail[i + 1] - phys_avail[i];
188161202Simp
189161202Simp		if (size > biggestsize) {
190161202Simp			biggestone = i;
191161202Simp			biggestsize = size;
192161202Simp		}
193161202Simp		++nblocks;
194161202Simp		total += size;
195161202Simp	}
196161202Simp
197161202Simp	start = phys_avail[biggestone];
198161202Simp
199161202Simp
200161202Simp	/*
201161202Simp	 * Initialize the locks
202161202Simp	 */
203161202Simp
204161202Simp	simple_lock_init(&vm_page_queue_free_lock);
205161202Simp	simple_lock_init(&vm_page_queue_lock);
206161202Simp
207161202Simp	/*
208	 * Initialize the queue headers for the free queue, the active queue
209	 * and the inactive queue.
210	 */
211
212	TAILQ_INIT(&vm_page_queue_free);
213	TAILQ_INIT(&vm_page_queue_active);
214	TAILQ_INIT(&vm_page_queue_inactive);
215	TAILQ_INIT(&vm_page_queue_cache);
216
217	/*
218	 * Allocate (and initialize) the hash table buckets.
219	 *
220	 * The number of buckets MUST BE a power of 2, and the actual value is
221	 * the next power of 2 greater than the number of physical pages in
222	 * the system.
223	 *
224	 * Note: This computation can be tweaked if desired.
225	 */
226	vm_page_buckets = (struct pglist *) vaddr;
227	bucket = vm_page_buckets;
228	if (vm_page_bucket_count == 0) {
229		vm_page_bucket_count = 1;
230		while (vm_page_bucket_count < atop(total))
231			vm_page_bucket_count <<= 1;
232	}
233	vm_page_hash_mask = vm_page_bucket_count - 1;
234
235	/*
236	 * Validate these addresses.
237	 */
238
239	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
240	new_start = round_page(new_start);
241	mapped = vaddr;
242	vaddr = pmap_map(mapped, start, new_start,
243	    VM_PROT_READ | VM_PROT_WRITE);
244	start = new_start;
245	bzero((caddr_t) mapped, vaddr - mapped);
246	mapped = vaddr;
247
248	for (i = 0; i < vm_page_bucket_count; i++) {
249		TAILQ_INIT(bucket);
250		bucket++;
251	}
252
253	simple_lock_init(&bucket_lock);
254
255	/*
256	 * round (or truncate) the addresses to our page size.
257	 */
258
259	/*
260	 * Pre-allocate maps and map entries that cannot be dynamically
261	 * allocated via malloc().  The maps include the kernel_map and
262	 * kmem_map which must be initialized before malloc() will work
263	 * (obviously).  Also could include pager maps which would be
264	 * allocated before kmeminit.
265	 *
266	 * Allow some kernel map entries... this should be plenty since people
267	 * shouldn't be cluttering up the kernel map (they should use their
268	 * own maps).
269	 */
270
271	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
272	    MAX_KMAPENT * sizeof(struct vm_map_entry);
273	kentry_data_size = round_page(kentry_data_size);
274	kentry_data = (vm_offset_t) vaddr;
275	vaddr += kentry_data_size;
276
277	/*
278	 * Validate these zone addresses.
279	 */
280
281	new_start = start + (vaddr - mapped);
282	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
283	bzero((caddr_t) mapped, (vaddr - mapped));
284	start = round_page(new_start);
285
286	/*
287	 * Compute the number of pages of memory that will be available for
288	 * use (taking into account the overhead of a page structure per
289	 * page).
290	 */
291
292	first_page = phys_avail[0] / PAGE_SIZE;
293
294	/* for VM_PAGE_CHECK() */
295	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
296
297	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
298	npages = (total - (page_range * sizeof(struct vm_page)) -
299	    (start - phys_avail[biggestone])) / PAGE_SIZE;
300
301	/*
302	 * Initialize the mem entry structures now, and put them in the free
303	 * queue.
304	 */
305
306	vm_page_array = (vm_page_t) vaddr;
307	mapped = vaddr;
308
309
310	/*
311	 * Validate these addresses.
312	 */
313
314	new_start = round_page(start + page_range * sizeof(struct vm_page));
315	mapped = pmap_map(mapped, start, new_start,
316	    VM_PROT_READ | VM_PROT_WRITE);
317	start = new_start;
318
319	first_managed_page = start / PAGE_SIZE;
320
321	/*
322	 * Clear all of the page structures
323	 */
324	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
325	vm_page_array_size = page_range;
326
327	cnt.v_page_count = 0;
328	cnt.v_free_count = 0;
329	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
330		if (i == biggestone)
331			pa = ptoa(first_managed_page);
332		else
333			pa = phys_avail[i];
334		while (pa < phys_avail[i + 1] && npages-- > 0) {
335			++cnt.v_page_count;
336			++cnt.v_free_count;
337			m = PHYS_TO_VM_PAGE(pa);
338			m->flags = PG_FREE;
339			vm_page_set_clean(m, 0, PAGE_SIZE);
340			m->object = 0;
341			m->phys_addr = pa;
342			m->hold_count = 0;
343			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
344			pa += PAGE_SIZE;
345		}
346	}
347
348	/*
349	 * Initialize vm_pages_needed lock here - don't wait for pageout
350	 * daemon	XXX
351	 */
352	simple_lock_init(&vm_pages_needed_lock);
353
354	return (mapped);
355}
356
357/*
358 *	vm_page_hash:
359 *
360 *	Distributes the object/offset key pair among hash buckets.
361 *
362 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
363 */
364inline const int
365vm_page_hash(object, offset)
366	vm_object_t object;
367	vm_offset_t offset;
368{
369	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
370}
371
372/*
373 *	vm_page_insert:		[ internal use only ]
374 *
375 *	Inserts the given mem entry into the object/object-page
376 *	table and object list.
377 *
378 *	The object and page must be locked, and must be splhigh.
379 */
380
381inline void
382vm_page_insert(mem, object, offset)
383	register vm_page_t mem;
384	register vm_object_t object;
385	register vm_offset_t offset;
386{
387	register struct pglist *bucket;
388
389	VM_PAGE_CHECK(mem);
390
391	if (mem->flags & PG_TABLED)
392		panic("vm_page_insert: already inserted");
393
394	/*
395	 * Record the object/offset pair in this page
396	 */
397
398	mem->object = object;
399	mem->offset = offset;
400
401	/*
402	 * Insert it into the object_object/offset hash table
403	 */
404
405	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
406	simple_lock(&bucket_lock);
407	TAILQ_INSERT_TAIL(bucket, mem, hashq);
408	simple_unlock(&bucket_lock);
409
410	/*
411	 * Now link into the object's list of backed pages.
412	 */
413
414	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
415	mem->flags |= PG_TABLED;
416
417	/*
418	 * And show that the object has one more resident page.
419	 */
420
421	object->resident_page_count++;
422}
423
424/*
425 *	vm_page_remove:		[ internal use only ]
426 *				NOTE: used by device pager as well -wfj
427 *
428 *	Removes the given mem entry from the object/offset-page
429 *	table and the object page list.
430 *
431 *	The object and page must be locked, and at splhigh.
432 */
433
434inline void
435vm_page_remove(mem)
436	register vm_page_t mem;
437{
438	register struct pglist *bucket;
439
440	VM_PAGE_CHECK(mem);
441
442	if (!(mem->flags & PG_TABLED))
443		return;
444
445	/*
446	 * Remove from the object_object/offset hash table
447	 */
448
449	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
450	simple_lock(&bucket_lock);
451	TAILQ_REMOVE(bucket, mem, hashq);
452	simple_unlock(&bucket_lock);
453
454	/*
455	 * Now remove from the object's list of backed pages.
456	 */
457
458	TAILQ_REMOVE(&mem->object->memq, mem, listq);
459
460	/*
461	 * And show that the object has one fewer resident page.
462	 */
463
464	mem->object->resident_page_count--;
465
466	mem->flags &= ~PG_TABLED;
467}
468
469/*
470 *	vm_page_lookup:
471 *
472 *	Returns the page associated with the object/offset
473 *	pair specified; if none is found, NULL is returned.
474 *
475 *	The object must be locked.  No side effects.
476 */
477
478vm_page_t
479vm_page_lookup(object, offset)
480	register vm_object_t object;
481	register vm_offset_t offset;
482{
483	register vm_page_t mem;
484	register struct pglist *bucket;
485	int s;
486
487	/*
488	 * Search the hash table for this object/offset pair
489	 */
490
491	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
492
493	s = splhigh();
494	simple_lock(&bucket_lock);
495	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
496		VM_PAGE_CHECK(mem);
497		if ((mem->object == object) && (mem->offset == offset)) {
498			simple_unlock(&bucket_lock);
499			splx(s);
500			return (mem);
501		}
502	}
503
504	simple_unlock(&bucket_lock);
505	splx(s);
506	return (NULL);
507}
508
509/*
510 *	vm_page_rename:
511 *
512 *	Move the given memory entry from its
513 *	current object to the specified target object/offset.
514 *
515 *	The object must be locked.
516 */
517void
518vm_page_rename(mem, new_object, new_offset)
519	register vm_page_t mem;
520	register vm_object_t new_object;
521	vm_offset_t new_offset;
522{
523	int s;
524
525	if (mem->object == new_object)
526		return;
527
528	vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
529	s = splhigh();
530	vm_page_remove(mem);
531	vm_page_insert(mem, new_object, new_offset);
532	splx(s);
533	vm_page_unlock_queues();
534}
535
536/*
537 * vm_page_unqueue must be called at splhigh();
538 */
539inline void
540vm_page_unqueue(vm_page_t mem)
541{
542	int origflags;
543
544	origflags = mem->flags;
545
546	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
547		return;
548
549	if (origflags & PG_ACTIVE) {
550		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
551		cnt.v_active_count--;
552		mem->flags &= ~PG_ACTIVE;
553	} else if (origflags & PG_INACTIVE) {
554		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
555		cnt.v_inactive_count--;
556		mem->flags &= ~PG_INACTIVE;
557	} else if (origflags & PG_CACHE) {
558		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
559		cnt.v_cache_count--;
560		mem->flags &= ~PG_CACHE;
561		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
562			pagedaemon_wakeup();
563	}
564	return;
565}
566
567/*
568 *	vm_page_alloc:
569 *
570 *	Allocate and return a memory cell associated
571 *	with this VM object/offset pair.
572 *
573 *	page_req classes:
574 *	VM_ALLOC_NORMAL		normal process request
575 *	VM_ALLOC_SYSTEM		system *really* needs a page
576 *	VM_ALLOC_INTERRUPT	interrupt time request
577 *
578 *	Object must be locked.
579 */
580vm_page_t
581vm_page_alloc(object, offset, page_req)
582	vm_object_t object;
583	vm_offset_t offset;
584	int page_req;
585{
586	register vm_page_t mem;
587	int s;
588
589	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
590		page_req = VM_ALLOC_SYSTEM;
591	};
592
593	simple_lock(&vm_page_queue_free_lock);
594
595	s = splhigh();
596
597	mem = vm_page_queue_free.tqh_first;
598
599	switch (page_req) {
600	case VM_ALLOC_NORMAL:
601		if (cnt.v_free_count >= cnt.v_free_reserved) {
602			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
603			cnt.v_free_count--;
604		} else {
605			mem = vm_page_queue_cache.tqh_first;
606			if (mem != NULL) {
607				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
608				vm_page_remove(mem);
609				cnt.v_cache_count--;
610			} else {
611				simple_unlock(&vm_page_queue_free_lock);
612				splx(s);
613				pagedaemon_wakeup();
614				return (NULL);
615			}
616		}
617		break;
618
619	case VM_ALLOC_SYSTEM:
620		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
621		    ((cnt.v_cache_count == 0) &&
622		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
623			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
624			cnt.v_free_count--;
625		} else {
626			mem = vm_page_queue_cache.tqh_first;
627			if (mem != NULL) {
628				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
629				vm_page_remove(mem);
630				cnt.v_cache_count--;
631			} else {
632				simple_unlock(&vm_page_queue_free_lock);
633				splx(s);
634				pagedaemon_wakeup();
635				return (NULL);
636			}
637		}
638		break;
639
640	case VM_ALLOC_INTERRUPT:
641		if (mem != NULL) {
642			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
643			cnt.v_free_count--;
644		} else {
645			simple_unlock(&vm_page_queue_free_lock);
646			splx(s);
647			pagedaemon_wakeup();
648			return NULL;
649		}
650		break;
651
652	default:
653		panic("vm_page_alloc: invalid allocation class");
654	}
655
656	simple_unlock(&vm_page_queue_free_lock);
657
658	mem->flags = PG_BUSY;
659	mem->wire_count = 0;
660	mem->hold_count = 0;
661	mem->act_count = 0;
662	mem->busy = 0;
663	mem->valid = 0;
664	mem->dirty = 0;
665	mem->bmapped = 0;
666
667	/* XXX before splx until vm_page_insert is safe */
668	vm_page_insert(mem, object, offset);
669
670	splx(s);
671
672	/*
673	 * Don't wakeup too often - wakeup the pageout daemon when
674	 * we would be nearly out of memory.
675	 */
676	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
677	    (cnt.v_free_count < cnt.v_pageout_free_min))
678		pagedaemon_wakeup();
679
680	return (mem);
681}
682
683vm_offset_t
684vm_page_alloc_contig(size, low, high, alignment)
685	vm_offset_t size;
686	vm_offset_t low;
687	vm_offset_t high;
688	vm_offset_t alignment;
689{
690	int i, s, start;
691	vm_offset_t addr, phys, tmp_addr;
692	vm_page_t pga = vm_page_array;
693
694	if ((alignment & (alignment - 1)) != 0)
695		panic("vm_page_alloc_contig: alignment must be a power of 2");
696
697	start = 0;
698	s = splhigh();
699again:
700	/*
701	 * Find first page in array that is free, within range, and aligned.
702	 */
703	for (i = start; i < cnt.v_page_count; i++) {
704		phys = VM_PAGE_TO_PHYS(&pga[i]);
705		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
706		    (phys >= low) && (phys < high) &&
707		    ((phys & (alignment - 1)) == 0))
708			break;
709	}
710
711	/*
712	 * If the above failed or we will exceed the upper bound, fail.
713	 */
714	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
715		splx(s);
716		return (NULL);
717	}
718	start = i;
719
720	/*
721	 * Check successive pages for contiguous and free.
722	 */
723	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
724		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
725			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
726		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
727			start++;
728			goto again;
729		}
730	}
731
732	/*
733	 * We've found a contiguous chunk that meets are requirements.
734	 * Allocate kernel VM, unfree and assign the physical pages to it and
735	 * return kernel VM pointer.
736	 */
737	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
738
739	for (i = start; i < (start + size / PAGE_SIZE); i++) {
740		vm_page_t m = &pga[i];;
741
742		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
743		cnt.v_free_count--;
744		m->valid = VM_PAGE_BITS_ALL;
745		m->flags = 0;
746		m->dirty = 0;
747		m->wire_count = 0;
748		m->act_count = 0;
749		m->bmapped = 0;
750		m->busy = 0;
751		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
752		vm_page_wire(m);
753		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(&pga[i]));
754		tmp_addr += PAGE_SIZE;
755	}
756
757	splx(s);
758	return (addr);
759}
760
761/*
762 *	vm_page_free:
763 *
764 *	Returns the given page to the free list,
765 *	disassociating it with any VM object.
766 *
767 *	Object and page must be locked prior to entry.
768 */
769void
770vm_page_free(mem)
771	register vm_page_t mem;
772{
773	int s;
774	int flags;
775
776	s = splhigh();
777	vm_page_remove(mem);
778	vm_page_unqueue(mem);
779
780	flags = mem->flags;
781	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
782		if (flags & PG_FREE)
783			panic("vm_page_free: freeing free page");
784		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
785		    mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
786		panic("vm_page_free: freeing busy page\n");
787	}
788
789	if ((flags & PG_WANTED) != 0)
790		wakeup((caddr_t) mem);
791	if ((flags & PG_FICTITIOUS) == 0) {
792
793		simple_lock(&vm_page_queue_free_lock);
794		if (mem->wire_count) {
795			if (mem->wire_count > 1) {
796				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
797				panic("vm_page_free: invalid wire count");
798			}
799			cnt.v_wire_count--;
800			mem->wire_count = 0;
801		}
802		mem->flags |= PG_FREE;
803		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
804
805		simple_unlock(&vm_page_queue_free_lock);
806		splx(s);
807		/*
808		 * if pageout daemon needs pages, then tell it that there are
809		 * some free.
810		 */
811		if (vm_pageout_pages_needed) {
812			wakeup((caddr_t) &vm_pageout_pages_needed);
813			vm_pageout_pages_needed = 0;
814		}
815
816		cnt.v_free_count++;
817		/*
818		 * wakeup processes that are waiting on memory if we hit a
819		 * high water mark. And wakeup scheduler process if we have
820		 * lots of memory. this process will swapin processes.
821		 */
822		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
823			wakeup((caddr_t) &cnt.v_free_count);
824			wakeup((caddr_t) &proc0);
825		}
826	} else {
827		splx(s);
828	}
829	cnt.v_tfree++;
830}
831
832
833/*
834 *	vm_page_wire:
835 *
836 *	Mark this page as wired down by yet
837 *	another map, removing it from paging queues
838 *	as necessary.
839 *
840 *	The page queues must be locked.
841 */
842void
843vm_page_wire(mem)
844	register vm_page_t mem;
845{
846	int s;
847	VM_PAGE_CHECK(mem);
848
849	if (mem->wire_count == 0) {
850		s = splhigh();
851		vm_page_unqueue(mem);
852		splx(s);
853		cnt.v_wire_count++;
854	}
855	mem->flags |= PG_WRITEABLE|PG_MAPPED;
856	mem->wire_count++;
857}
858
859/*
860 *	vm_page_unwire:
861 *
862 *	Release one wiring of this page, potentially
863 *	enabling it to be paged again.
864 *
865 *	The page queues must be locked.
866 */
867void
868vm_page_unwire(mem)
869	register vm_page_t mem;
870{
871	int s;
872
873	VM_PAGE_CHECK(mem);
874
875	s = splhigh();
876
877	if (mem->wire_count)
878		mem->wire_count--;
879	if (mem->wire_count == 0) {
880		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
881		cnt.v_active_count++;
882		mem->flags |= PG_ACTIVE;
883		cnt.v_wire_count--;
884	}
885	splx(s);
886}
887
888/*
889 *	vm_page_deactivate:
890 *
891 *	Returns the given page to the inactive list,
892 *	indicating that no physical maps have access
893 *	to this page.  [Used by the physical mapping system.]
894 *
895 *	The page queues must be locked.
896 */
897void
898vm_page_deactivate(m)
899	register vm_page_t m;
900{
901	int spl;
902
903	VM_PAGE_CHECK(m);
904
905	/*
906	 * Only move active pages -- ignore locked or already inactive ones.
907	 *
908	 * XXX: sometimes we get pages which aren't wired down or on any queue -
909	 * we need to put them on the inactive queue also, otherwise we lose
910	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
911	 */
912
913	spl = splhigh();
914	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
915	    m->hold_count == 0) {
916		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
917		vm_page_unqueue(m);
918		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
919		m->flags |= PG_INACTIVE;
920		cnt.v_inactive_count++;
921		m->act_count = 0;
922	}
923	splx(spl);
924}
925
926/*
927 * vm_page_cache
928 *
929 * Put the specified page onto the page cache queue (if appropriate).
930 */
931
932void
933vm_page_cache(m)
934	register vm_page_t m;
935{
936	int s;
937
938	VM_PAGE_CHECK(m);
939	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
940	    m->bmapped)
941		return;
942
943	s = splhigh();
944	vm_page_unqueue(m);
945	vm_page_protect(m, VM_PROT_NONE);
946
947	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
948	m->flags |= PG_CACHE;
949	cnt.v_cache_count++;
950	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
951		wakeup((caddr_t) &cnt.v_free_count);
952		wakeup((caddr_t) &proc0);
953	}
954	if (vm_pageout_pages_needed) {
955		wakeup((caddr_t) &vm_pageout_pages_needed);
956		vm_pageout_pages_needed = 0;
957	}
958
959	splx(s);
960}
961
962/*
963 *	vm_page_activate:
964 *
965 *	Put the specified page on the active list (if appropriate).
966 *
967 *	The page queues must be locked.
968 */
969
970void
971vm_page_activate(m)
972	register vm_page_t m;
973{
974	int s;
975
976	VM_PAGE_CHECK(m);
977
978	s = splhigh();
979	if (m->flags & PG_ACTIVE)
980		panic("vm_page_activate: already active");
981
982	vm_page_unqueue(m);
983
984	if (m->wire_count == 0) {
985		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
986		m->flags |= PG_ACTIVE;
987		TAILQ_REMOVE(&m->object->memq, m, listq);
988		TAILQ_INSERT_TAIL(&m->object->memq, m, listq);
989		if (m->act_count < 5)
990			m->act_count = 5;
991		else if( m->act_count < ACT_MAX)
992			m->act_count += 1;
993		cnt.v_active_count++;
994	}
995	splx(s);
996}
997
998/*
999 *	vm_page_zero_fill:
1000 *
1001 *	Zero-fill the specified page.
1002 *	Written as a standard pagein routine, to
1003 *	be used by the zero-fill object.
1004 */
1005
1006boolean_t
1007vm_page_zero_fill(m)
1008	vm_page_t m;
1009{
1010	VM_PAGE_CHECK(m);
1011
1012	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1013	m->valid = VM_PAGE_BITS_ALL;
1014	return (TRUE);
1015}
1016
1017/*
1018 *	vm_page_copy:
1019 *
1020 *	Copy one page to another
1021 */
1022void
1023vm_page_copy(src_m, dest_m)
1024	vm_page_t src_m;
1025	vm_page_t dest_m;
1026{
1027	VM_PAGE_CHECK(src_m);
1028	VM_PAGE_CHECK(dest_m);
1029
1030	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1031	dest_m->valid = VM_PAGE_BITS_ALL;
1032}
1033
1034
1035/*
1036 * mapping function for valid bits or for dirty bits in
1037 * a page
1038 */
1039inline int
1040vm_page_bits(int base, int size)
1041{
1042	u_short chunk;
1043
1044	if ((base == 0) && (size >= PAGE_SIZE))
1045		return VM_PAGE_BITS_ALL;
1046	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1047	base = (base % PAGE_SIZE) / DEV_BSIZE;
1048	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1049	return (chunk << base) & VM_PAGE_BITS_ALL;
1050}
1051
1052/*
1053 * set a page (partially) valid
1054 */
1055void
1056vm_page_set_valid(m, base, size)
1057	vm_page_t m;
1058	int base;
1059	int size;
1060{
1061	m->valid |= vm_page_bits(base, size);
1062}
1063
1064/*
1065 * set a page (partially) invalid
1066 */
1067void
1068vm_page_set_invalid(m, base, size)
1069	vm_page_t m;
1070	int base;
1071	int size;
1072{
1073	int bits;
1074
1075	m->valid &= ~(bits = vm_page_bits(base, size));
1076	if (m->valid == 0)
1077		m->dirty &= ~bits;
1078}
1079
1080/*
1081 * is (partial) page valid?
1082 */
1083int
1084vm_page_is_valid(m, base, size)
1085	vm_page_t m;
1086	int base;
1087	int size;
1088{
1089	int bits;
1090
1091	if (m->valid && ((m->valid & (bits = vm_page_bits(base, size))) == bits))
1092		return 1;
1093	else
1094		return 0;
1095}
1096
1097
1098/*
1099 * set a page (partially) dirty
1100 */
1101void
1102vm_page_set_dirty(m, base, size)
1103	vm_page_t m;
1104	int base;
1105	int size;
1106{
1107	if ((base != 0) || (size != PAGE_SIZE)) {
1108		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1109			m->dirty = VM_PAGE_BITS_ALL;
1110			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1111			return;
1112		}
1113		m->dirty |= vm_page_bits(base, size);
1114	} else {
1115		m->dirty = VM_PAGE_BITS_ALL;
1116		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1117	}
1118}
1119
1120void
1121vm_page_test_dirty(m)
1122	vm_page_t m;
1123{
1124	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1125		pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1126		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1127		m->dirty = VM_PAGE_BITS_ALL;
1128	}
1129}
1130
1131/*
1132 * set a page (partially) clean
1133 */
1134void
1135vm_page_set_clean(m, base, size)
1136	vm_page_t m;
1137	int base;
1138	int size;
1139{
1140	m->dirty &= ~vm_page_bits(base, size);
1141}
1142
1143/*
1144 * is (partial) page clean
1145 */
1146int
1147vm_page_is_clean(m, base, size)
1148	vm_page_t m;
1149	int base;
1150	int size;
1151{
1152	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1153		m->dirty = VM_PAGE_BITS_ALL;
1154		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1155	}
1156	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1157		return 1;
1158	else
1159		return 0;
1160}
1161
1162void
1163print_page_info()
1164{
1165	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1166	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1167	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1168	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1169	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1170	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1171	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1172	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1173	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1174	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1175}
1176