vm_page.c revision 7879
1260684Skaiw/*
2260684Skaiw * Copyright (c) 1991 Regents of the University of California.
3260684Skaiw * All rights reserved.
4260684Skaiw *
5260684Skaiw * This code is derived from software contributed to Berkeley by
6260684Skaiw * The Mach Operating System project at Carnegie-Mellon University.
7260684Skaiw *
8260684Skaiw * Redistribution and use in source and binary forms, with or without
9260684Skaiw * modification, are permitted provided that the following conditions
10260684Skaiw * are met:
11260684Skaiw * 1. Redistributions of source code must retain the above copyright
12260684Skaiw *    notice, this list of conditions and the following disclaimer.
13260684Skaiw * 2. Redistributions in binary form must reproduce the above copyright
14260684Skaiw *    notice, this list of conditions and the following disclaimer in the
15260684Skaiw *    documentation and/or other materials provided with the distribution.
16260684Skaiw * 3. All advertising materials mentioning features or use of this software
17260684Skaiw *    must display the following acknowledgement:
18260684Skaiw *	This product includes software developed by the University of
19260684Skaiw *	California, Berkeley and its contributors.
20260684Skaiw * 4. Neither the name of the University nor the names of its contributors
21260684Skaiw *    may be used to endorse or promote products derived from this software
22260684Skaiw *    without specific prior written permission.
23260684Skaiw *
24367466Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25260684Skaiw * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26367466Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27367466Sdim * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28260684Skaiw * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29260684Skaiw * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30260684Skaiw * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31260684Skaiw * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32260684Skaiw * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33260684Skaiw * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34260684Skaiw * SUCH DAMAGE.
35260684Skaiw *
36260684Skaiw *	from: @(#)vm_page.c	7.4 (Berkeley) 5/7/91
37260684Skaiw *	$Id: vm_page.c,v 1.29 1995/04/16 09:59:16 davidg Exp $
38260684Skaiw */
39260684Skaiw
40260684Skaiw/*
41260684Skaiw * Copyright (c) 1987, 1990 Carnegie-Mellon University.
42260684Skaiw * All rights reserved.
43260684Skaiw *
44260684Skaiw * Authors: Avadis Tevanian, Jr., Michael Wayne Young
45260684Skaiw *
46260684Skaiw * Permission to use, copy, modify and distribute this software and
47260684Skaiw * its documentation is hereby granted, provided that both the copyright
48260684Skaiw * notice and this permission notice appear in all copies of the
49260684Skaiw * software, derivative works or modified versions, and any portions
50260684Skaiw * thereof, and that both notices appear in supporting documentation.
51367466Sdim *
52367466Sdim * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
53260684Skaiw * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
54260684Skaiw * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
55260684Skaiw *
56260684Skaiw * Carnegie Mellon requests users of this software to return to
57260684Skaiw *
58260684Skaiw *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
59260684Skaiw *  School of Computer Science
60260684Skaiw *  Carnegie Mellon University
61260684Skaiw *  Pittsburgh PA 15213-3890
62260684Skaiw *
63295577Semaste * any improvements or extensions that they make and grant Carnegie the
64295577Semaste * rights to redistribute these changes.
65260684Skaiw */
66295577Semaste
67295577Semaste/*
68260684Skaiw *	Resident memory management module.
69260684Skaiw */
70260684Skaiw
71260684Skaiw#include <sys/param.h>
72260684Skaiw#include <sys/systm.h>
73260684Skaiw#include <sys/proc.h>
74260684Skaiw
75260684Skaiw#include <vm/vm.h>
76260684Skaiw#include <vm/vm_kern.h>
77260684Skaiw#include <vm/vm_page.h>
78260684Skaiw#include <vm/vm_map.h>
79317623Semaste#include <vm/vm_pageout.h>
80260684Skaiw
81367466Sdim/*
82367466Sdim *	Associated with page of user-allocatable memory is a
83367466Sdim *	page structure.
84260684Skaiw */
85260684Skaiw
86260684Skaiwstruct pglist *vm_page_buckets;	/* Array of buckets */
87260684Skaiwint vm_page_bucket_count = 0;	/* How big is array? */
88260684Skaiwint vm_page_hash_mask;		/* Mask for hash function */
89260684Skaiwsimple_lock_data_t bucket_lock;	/* lock for all buckets XXX */
90260684Skaiw
91313506Semastestruct pglist vm_page_queue_free;
92313506Semastestruct pglist vm_page_queue_active;
93260684Skaiwstruct pglist vm_page_queue_inactive;
94260684Skaiwstruct pglist vm_page_queue_cache;
95260684Skaiwsimple_lock_data_t vm_page_queue_lock;
96260684Skaiwsimple_lock_data_t vm_page_queue_free_lock;
97260684Skaiw
98260684Skaiw/* has physical page allocation been initialized? */
99260684Skaiwboolean_t vm_page_startup_initialized;
100305839Semaste
101260684Skaiwvm_page_t vm_page_array;
102260684Skaiwint vm_page_array_size;
103317623Semastelong first_page;
104260684Skaiwlong last_page;
105260684Skaiwvm_offset_t first_phys_addr;
106260684Skaiwvm_offset_t last_phys_addr;
107260684Skaiwvm_size_t page_mask;
108260684Skaiwint page_shift;
109260684Skaiw
110260684Skaiw/*
111260684Skaiw * map of contiguous valid DEV_BSIZE chunks in a page
112260684Skaiw * (this list is valid for page sizes upto 16*DEV_BSIZE)
113313506Semaste */
114313506Semastestatic u_short vm_page_dev_bsize_chunks[] = {
115260684Skaiw	0x0, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff,
116260684Skaiw	0x1ff, 0x3ff, 0x7ff, 0xfff, 0x1fff, 0x3fff, 0x7fff, 0xffff
117260684Skaiw};
118295577Semaste
119295577Semaste
120260684Skaiw/*
121260684Skaiw *	vm_set_page_size:
122260684Skaiw *
123260684Skaiw *	Sets the page size, perhaps based upon the memory
124260684Skaiw *	size.  Must be called before any use of page-size
125260684Skaiw *	dependent functions.
126260684Skaiw *
127260684Skaiw *	Sets page_shift and page_mask from cnt.v_page_size.
128260684Skaiw */
129260684Skaiwvoid
130260684Skaiwvm_set_page_size()
131260684Skaiw{
132260684Skaiw
133260684Skaiw	if (cnt.v_page_size == 0)
134260684Skaiw		cnt.v_page_size = DEFAULT_PAGE_SIZE;
135260684Skaiw	page_mask = cnt.v_page_size - 1;
136260684Skaiw	if ((page_mask & cnt.v_page_size) != 0)
137260684Skaiw		panic("vm_set_page_size: page size not a power of two");
138260684Skaiw	for (page_shift = 0;; page_shift++)
139260684Skaiw		if ((1 << page_shift) == cnt.v_page_size)
140260684Skaiw			break;
141260684Skaiw}
142260684Skaiw
143260684Skaiw/*
144260684Skaiw *	vm_page_startup:
145260684Skaiw *
146260684Skaiw *	Initializes the resident memory module.
147260684Skaiw *
148260684Skaiw *	Allocates memory for the page cells, and
149260684Skaiw *	for the object/offset-to-page hash table headers.
150260684Skaiw *	Each page cell is initialized and placed on the free list.
151260684Skaiw */
152260684Skaiw
153260684Skaiwvm_offset_t
154260684Skaiwvm_page_startup(starta, enda, vaddr)
155260684Skaiw	register vm_offset_t starta;
156260684Skaiw	vm_offset_t enda;
157260684Skaiw	register vm_offset_t vaddr;
158260684Skaiw{
159260684Skaiw	register vm_offset_t mapped;
160260684Skaiw	register vm_page_t m;
161260684Skaiw	register struct pglist *bucket;
162260684Skaiw	vm_size_t npages, page_range;
163260684Skaiw	register vm_offset_t new_start;
164260684Skaiw	int i;
165260684Skaiw	vm_offset_t pa;
166260684Skaiw	int nblocks;
167260684Skaiw	vm_offset_t first_managed_page;
168260684Skaiw
169260684Skaiw	/* the biggest memory array is the second group of pages */
170260684Skaiw	vm_offset_t start;
171260684Skaiw	vm_offset_t biggestone, biggestsize;
172260684Skaiw
173260684Skaiw	vm_offset_t total;
174260684Skaiw
175260684Skaiw	total = 0;
176260684Skaiw	biggestsize = 0;
177260684Skaiw	biggestone = 0;
178260684Skaiw	nblocks = 0;
179260684Skaiw	vaddr = round_page(vaddr);
180260684Skaiw
181260684Skaiw	for (i = 0; phys_avail[i + 1]; i += 2) {
182260684Skaiw		phys_avail[i] = round_page(phys_avail[i]);
183260684Skaiw		phys_avail[i + 1] = trunc_page(phys_avail[i + 1]);
184260684Skaiw	}
185260684Skaiw
186260684Skaiw	for (i = 0; phys_avail[i + 1]; i += 2) {
187260684Skaiw		int size = phys_avail[i + 1] - phys_avail[i];
188260684Skaiw
189260684Skaiw		if (size > biggestsize) {
190260684Skaiw			biggestone = i;
191260684Skaiw			biggestsize = size;
192260684Skaiw		}
193260684Skaiw		++nblocks;
194260684Skaiw		total += size;
195260684Skaiw	}
196260684Skaiw
197260684Skaiw	start = phys_avail[biggestone];
198260684Skaiw
199260684Skaiw
200260684Skaiw	/*
201260684Skaiw	 * Initialize the locks
202260684Skaiw	 */
203260684Skaiw
204260684Skaiw	simple_lock_init(&vm_page_queue_free_lock);
205260684Skaiw	simple_lock_init(&vm_page_queue_lock);
206
207	/*
208	 * Initialize the queue headers for the free queue, the active queue
209	 * and the inactive queue.
210	 */
211
212	TAILQ_INIT(&vm_page_queue_free);
213	TAILQ_INIT(&vm_page_queue_active);
214	TAILQ_INIT(&vm_page_queue_inactive);
215	TAILQ_INIT(&vm_page_queue_cache);
216
217	/*
218	 * Allocate (and initialize) the hash table buckets.
219	 *
220	 * The number of buckets MUST BE a power of 2, and the actual value is
221	 * the next power of 2 greater than the number of physical pages in
222	 * the system.
223	 *
224	 * Note: This computation can be tweaked if desired.
225	 */
226	vm_page_buckets = (struct pglist *) vaddr;
227	bucket = vm_page_buckets;
228	if (vm_page_bucket_count == 0) {
229		vm_page_bucket_count = 1;
230		while (vm_page_bucket_count < atop(total))
231			vm_page_bucket_count <<= 1;
232	}
233	vm_page_hash_mask = vm_page_bucket_count - 1;
234
235	/*
236	 * Validate these addresses.
237	 */
238
239	new_start = start + vm_page_bucket_count * sizeof(struct pglist);
240	new_start = round_page(new_start);
241	mapped = vaddr;
242	vaddr = pmap_map(mapped, start, new_start,
243	    VM_PROT_READ | VM_PROT_WRITE);
244	start = new_start;
245	bzero((caddr_t) mapped, vaddr - mapped);
246	mapped = vaddr;
247
248	for (i = 0; i < vm_page_bucket_count; i++) {
249		TAILQ_INIT(bucket);
250		bucket++;
251	}
252
253	simple_lock_init(&bucket_lock);
254
255	/*
256	 * round (or truncate) the addresses to our page size.
257	 */
258
259	/*
260	 * Pre-allocate maps and map entries that cannot be dynamically
261	 * allocated via malloc().  The maps include the kernel_map and
262	 * kmem_map which must be initialized before malloc() will work
263	 * (obviously).  Also could include pager maps which would be
264	 * allocated before kmeminit.
265	 *
266	 * Allow some kernel map entries... this should be plenty since people
267	 * shouldn't be cluttering up the kernel map (they should use their
268	 * own maps).
269	 */
270
271	kentry_data_size = MAX_KMAP * sizeof(struct vm_map) +
272	    MAX_KMAPENT * sizeof(struct vm_map_entry);
273	kentry_data_size = round_page(kentry_data_size);
274	kentry_data = (vm_offset_t) vaddr;
275	vaddr += kentry_data_size;
276
277	/*
278	 * Validate these zone addresses.
279	 */
280
281	new_start = start + (vaddr - mapped);
282	pmap_map(mapped, start, new_start, VM_PROT_READ | VM_PROT_WRITE);
283	bzero((caddr_t) mapped, (vaddr - mapped));
284	start = round_page(new_start);
285
286	/*
287	 * Compute the number of pages of memory that will be available for
288	 * use (taking into account the overhead of a page structure per
289	 * page).
290	 */
291
292	first_page = phys_avail[0] / PAGE_SIZE;
293
294	/* for VM_PAGE_CHECK() */
295	last_page = phys_avail[(nblocks - 1) * 2 + 1] / PAGE_SIZE;
296
297	page_range = last_page - (phys_avail[0] / PAGE_SIZE);
298	npages = (total - (page_range * sizeof(struct vm_page)) -
299	    (start - phys_avail[biggestone])) / PAGE_SIZE;
300
301	/*
302	 * Initialize the mem entry structures now, and put them in the free
303	 * queue.
304	 */
305
306	vm_page_array = (vm_page_t) vaddr;
307	mapped = vaddr;
308
309
310	/*
311	 * Validate these addresses.
312	 */
313
314	new_start = round_page(start + page_range * sizeof(struct vm_page));
315	mapped = pmap_map(mapped, start, new_start,
316	    VM_PROT_READ | VM_PROT_WRITE);
317	start = new_start;
318
319	first_managed_page = start / PAGE_SIZE;
320
321	/*
322	 * Clear all of the page structures
323	 */
324	bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page));
325	vm_page_array_size = page_range;
326
327	cnt.v_page_count = 0;
328	cnt.v_free_count = 0;
329	for (i = 0; phys_avail[i + 1] && npages > 0; i += 2) {
330		if (i == biggestone)
331			pa = ptoa(first_managed_page);
332		else
333			pa = phys_avail[i];
334		while (pa < phys_avail[i + 1] && npages-- > 0) {
335			++cnt.v_page_count;
336			++cnt.v_free_count;
337			m = PHYS_TO_VM_PAGE(pa);
338			m->flags = PG_FREE;
339			m->phys_addr = pa;
340			TAILQ_INSERT_TAIL(&vm_page_queue_free, m, pageq);
341			pa += PAGE_SIZE;
342		}
343	}
344
345	/*
346	 * Initialize vm_pages_needed lock here - don't wait for pageout
347	 * daemon	XXX
348	 */
349	simple_lock_init(&vm_pages_needed_lock);
350
351	return (mapped);
352}
353
354/*
355 *	vm_page_hash:
356 *
357 *	Distributes the object/offset key pair among hash buckets.
358 *
359 *	NOTE:  This macro depends on vm_page_bucket_count being a power of 2.
360 */
361inline const int
362vm_page_hash(object, offset)
363	vm_object_t object;
364	vm_offset_t offset;
365{
366	return ((unsigned) object + offset / NBPG) & vm_page_hash_mask;
367}
368
369/*
370 *	vm_page_insert:		[ internal use only ]
371 *
372 *	Inserts the given mem entry into the object/object-page
373 *	table and object list.
374 *
375 *	The object and page must be locked, and must be splhigh.
376 */
377
378inline void
379vm_page_insert(mem, object, offset)
380	register vm_page_t mem;
381	register vm_object_t object;
382	register vm_offset_t offset;
383{
384	register struct pglist *bucket;
385
386	VM_PAGE_CHECK(mem);
387
388	if (mem->flags & PG_TABLED)
389		panic("vm_page_insert: already inserted");
390
391	/*
392	 * Record the object/offset pair in this page
393	 */
394
395	mem->object = object;
396	mem->offset = offset;
397
398	/*
399	 * Insert it into the object_object/offset hash table
400	 */
401
402	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
403	simple_lock(&bucket_lock);
404	TAILQ_INSERT_TAIL(bucket, mem, hashq);
405	simple_unlock(&bucket_lock);
406
407	/*
408	 * Now link into the object's list of backed pages.
409	 */
410
411	TAILQ_INSERT_TAIL(&object->memq, mem, listq);
412	mem->flags |= PG_TABLED;
413
414	/*
415	 * And show that the object has one more resident page.
416	 */
417
418	object->resident_page_count++;
419}
420
421/*
422 *	vm_page_remove:		[ internal use only ]
423 *				NOTE: used by device pager as well -wfj
424 *
425 *	Removes the given mem entry from the object/offset-page
426 *	table and the object page list.
427 *
428 *	The object and page must be locked, and at splhigh.
429 */
430
431inline void
432vm_page_remove(mem)
433	register vm_page_t mem;
434{
435	register struct pglist *bucket;
436
437	VM_PAGE_CHECK(mem);
438
439	if (!(mem->flags & PG_TABLED))
440		return;
441
442	/*
443	 * Remove from the object_object/offset hash table
444	 */
445
446	bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
447	simple_lock(&bucket_lock);
448	TAILQ_REMOVE(bucket, mem, hashq);
449	simple_unlock(&bucket_lock);
450
451	/*
452	 * Now remove from the object's list of backed pages.
453	 */
454
455	TAILQ_REMOVE(&mem->object->memq, mem, listq);
456
457	/*
458	 * And show that the object has one fewer resident page.
459	 */
460
461	mem->object->resident_page_count--;
462
463	mem->flags &= ~PG_TABLED;
464}
465
466/*
467 *	vm_page_lookup:
468 *
469 *	Returns the page associated with the object/offset
470 *	pair specified; if none is found, NULL is returned.
471 *
472 *	The object must be locked.  No side effects.
473 */
474
475vm_page_t
476vm_page_lookup(object, offset)
477	register vm_object_t object;
478	register vm_offset_t offset;
479{
480	register vm_page_t mem;
481	register struct pglist *bucket;
482	int s;
483
484	/*
485	 * Search the hash table for this object/offset pair
486	 */
487
488	bucket = &vm_page_buckets[vm_page_hash(object, offset)];
489
490	s = splhigh();
491	simple_lock(&bucket_lock);
492	for (mem = bucket->tqh_first; mem != NULL; mem = mem->hashq.tqe_next) {
493		VM_PAGE_CHECK(mem);
494		if ((mem->object == object) && (mem->offset == offset)) {
495			simple_unlock(&bucket_lock);
496			splx(s);
497			return (mem);
498		}
499	}
500
501	simple_unlock(&bucket_lock);
502	splx(s);
503	return (NULL);
504}
505
506/*
507 *	vm_page_rename:
508 *
509 *	Move the given memory entry from its
510 *	current object to the specified target object/offset.
511 *
512 *	The object must be locked.
513 */
514void
515vm_page_rename(mem, new_object, new_offset)
516	register vm_page_t mem;
517	register vm_object_t new_object;
518	vm_offset_t new_offset;
519{
520	int s;
521
522	if (mem->object == new_object)
523		return;
524
525	vm_page_lock_queues(); /* keep page from moving out from under pageout daemon */
526	s = splhigh();
527	vm_page_remove(mem);
528	vm_page_insert(mem, new_object, new_offset);
529	splx(s);
530	vm_page_unlock_queues();
531}
532
533/*
534 * vm_page_unqueue must be called at splhigh();
535 */
536inline void
537vm_page_unqueue(vm_page_t mem)
538{
539	int origflags;
540
541	origflags = mem->flags;
542
543	if ((origflags & (PG_ACTIVE|PG_INACTIVE|PG_CACHE)) == 0)
544		return;
545
546	if (origflags & PG_ACTIVE) {
547		TAILQ_REMOVE(&vm_page_queue_active, mem, pageq);
548		cnt.v_active_count--;
549		mem->flags &= ~PG_ACTIVE;
550	} else if (origflags & PG_INACTIVE) {
551		TAILQ_REMOVE(&vm_page_queue_inactive, mem, pageq);
552		cnt.v_inactive_count--;
553		mem->flags &= ~PG_INACTIVE;
554	} else if (origflags & PG_CACHE) {
555		TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
556		cnt.v_cache_count--;
557		mem->flags &= ~PG_CACHE;
558		if (cnt.v_cache_count + cnt.v_free_count < cnt.v_free_reserved)
559			pagedaemon_wakeup();
560	}
561	return;
562}
563
564/*
565 *	vm_page_alloc:
566 *
567 *	Allocate and return a memory cell associated
568 *	with this VM object/offset pair.
569 *
570 *	page_req classes:
571 *	VM_ALLOC_NORMAL		normal process request
572 *	VM_ALLOC_SYSTEM		system *really* needs a page
573 *	VM_ALLOC_INTERRUPT	interrupt time request
574 *
575 *	Object must be locked.
576 */
577vm_page_t
578vm_page_alloc(object, offset, page_req)
579	vm_object_t object;
580	vm_offset_t offset;
581	int page_req;
582{
583	register vm_page_t mem;
584	int s;
585
586	if ((curproc == pageproc) && (page_req != VM_ALLOC_INTERRUPT)) {
587		page_req = VM_ALLOC_SYSTEM;
588	};
589
590	simple_lock(&vm_page_queue_free_lock);
591
592	s = splhigh();
593
594	mem = vm_page_queue_free.tqh_first;
595
596	switch (page_req) {
597	case VM_ALLOC_NORMAL:
598		if (cnt.v_free_count >= cnt.v_free_reserved) {
599			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
600			cnt.v_free_count--;
601		} else {
602			mem = vm_page_queue_cache.tqh_first;
603			if (mem != NULL) {
604				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
605				vm_page_remove(mem);
606				cnt.v_cache_count--;
607			} else {
608				simple_unlock(&vm_page_queue_free_lock);
609				splx(s);
610				pagedaemon_wakeup();
611				return (NULL);
612			}
613		}
614		break;
615
616	case VM_ALLOC_SYSTEM:
617		if ((cnt.v_free_count >= cnt.v_free_reserved) ||
618		    ((cnt.v_cache_count == 0) &&
619		    (cnt.v_free_count >= cnt.v_interrupt_free_min))) {
620			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
621			cnt.v_free_count--;
622		} else {
623			mem = vm_page_queue_cache.tqh_first;
624			if (mem != NULL) {
625				TAILQ_REMOVE(&vm_page_queue_cache, mem, pageq);
626				vm_page_remove(mem);
627				cnt.v_cache_count--;
628			} else {
629				simple_unlock(&vm_page_queue_free_lock);
630				splx(s);
631				pagedaemon_wakeup();
632				return (NULL);
633			}
634		}
635		break;
636
637	case VM_ALLOC_INTERRUPT:
638		if (mem != NULL) {
639			TAILQ_REMOVE(&vm_page_queue_free, mem, pageq);
640			cnt.v_free_count--;
641		} else {
642			simple_unlock(&vm_page_queue_free_lock);
643			splx(s);
644			pagedaemon_wakeup();
645			return NULL;
646		}
647		break;
648
649	default:
650		panic("vm_page_alloc: invalid allocation class");
651	}
652
653	simple_unlock(&vm_page_queue_free_lock);
654
655	mem->flags = PG_BUSY;
656	mem->wire_count = 0;
657	mem->hold_count = 0;
658	mem->act_count = 0;
659	mem->busy = 0;
660	mem->valid = 0;
661	mem->dirty = 0;
662	mem->bmapped = 0;
663
664	/* XXX before splx until vm_page_insert is safe */
665	vm_page_insert(mem, object, offset);
666
667	splx(s);
668
669	/*
670	 * Don't wakeup too often - wakeup the pageout daemon when
671	 * we would be nearly out of memory.
672	 */
673	if (((cnt.v_free_count + cnt.v_cache_count) < cnt.v_free_min) ||
674	    (cnt.v_free_count < cnt.v_pageout_free_min))
675		pagedaemon_wakeup();
676
677	return (mem);
678}
679
680vm_offset_t
681vm_page_alloc_contig(size, low, high, alignment)
682	vm_offset_t size;
683	vm_offset_t low;
684	vm_offset_t high;
685	vm_offset_t alignment;
686{
687	int i, s, start;
688	vm_offset_t addr, phys, tmp_addr;
689	vm_page_t pga = vm_page_array;
690
691	if ((alignment & (alignment - 1)) != 0)
692		panic("vm_page_alloc_contig: alignment must be a power of 2");
693
694	start = 0;
695	s = splhigh();
696again:
697	/*
698	 * Find first page in array that is free, within range, and aligned.
699	 */
700	for (i = start; i < cnt.v_page_count; i++) {
701		phys = VM_PAGE_TO_PHYS(&pga[i]);
702		if (((pga[i].flags & PG_FREE) == PG_FREE) &&
703		    (phys >= low) && (phys < high) &&
704		    ((phys & (alignment - 1)) == 0))
705			break;
706	}
707
708	/*
709	 * If the above failed or we will exceed the upper bound, fail.
710	 */
711	if ((i == cnt.v_page_count) || ((VM_PAGE_TO_PHYS(&pga[i]) + size) > high)) {
712		splx(s);
713		return (NULL);
714	}
715	start = i;
716
717	/*
718	 * Check successive pages for contiguous and free.
719	 */
720	for (i = start + 1; i < (start + size / PAGE_SIZE); i++) {
721		if ((VM_PAGE_TO_PHYS(&pga[i]) !=
722			(VM_PAGE_TO_PHYS(&pga[i - 1]) + PAGE_SIZE)) ||
723		    ((pga[i].flags & PG_FREE) != PG_FREE)) {
724			start++;
725			goto again;
726		}
727	}
728
729	/*
730	 * We've found a contiguous chunk that meets are requirements.
731	 * Allocate kernel VM, unfree and assign the physical pages to it and
732	 * return kernel VM pointer.
733	 */
734	tmp_addr = addr = kmem_alloc_pageable(kernel_map, size);
735
736	for (i = start; i < (start + size / PAGE_SIZE); i++) {
737		vm_page_t m = &pga[i];
738
739		TAILQ_REMOVE(&vm_page_queue_free, m, pageq);
740		cnt.v_free_count--;
741		m->valid = VM_PAGE_BITS_ALL;
742		m->flags = 0;
743		m->dirty = 0;
744		m->wire_count = 0;
745		m->act_count = 0;
746		m->bmapped = 0;
747		m->busy = 0;
748		vm_page_insert(m, kernel_object, tmp_addr - VM_MIN_KERNEL_ADDRESS);
749		vm_page_wire(m);
750		pmap_kenter(tmp_addr, VM_PAGE_TO_PHYS(m));
751		tmp_addr += PAGE_SIZE;
752	}
753
754	splx(s);
755	return (addr);
756}
757
758/*
759 *	vm_page_free:
760 *
761 *	Returns the given page to the free list,
762 *	disassociating it with any VM object.
763 *
764 *	Object and page must be locked prior to entry.
765 */
766void
767vm_page_free(mem)
768	register vm_page_t mem;
769{
770	int s;
771	int flags;
772
773	s = splhigh();
774	vm_page_remove(mem);
775	vm_page_unqueue(mem);
776
777	flags = mem->flags;
778	if (mem->bmapped || mem->busy || flags & (PG_BUSY|PG_FREE)) {
779		if (flags & PG_FREE)
780			panic("vm_page_free: freeing free page");
781		printf("vm_page_free: offset(%d), bmapped(%d), busy(%d), PG_BUSY(%d)\n",
782		    mem->offset, mem->bmapped, mem->busy, (flags & PG_BUSY) ? 1 : 0);
783		panic("vm_page_free: freeing busy page");
784	}
785
786	if ((flags & PG_WANTED) != 0)
787		wakeup((caddr_t) mem);
788	if ((flags & PG_FICTITIOUS) == 0) {
789
790		simple_lock(&vm_page_queue_free_lock);
791		if (mem->wire_count) {
792			if (mem->wire_count > 1) {
793				printf("vm_page_free: wire count > 1 (%d)", mem->wire_count);
794				panic("vm_page_free: invalid wire count");
795			}
796			cnt.v_wire_count--;
797			mem->wire_count = 0;
798		}
799		mem->flags |= PG_FREE;
800		TAILQ_INSERT_TAIL(&vm_page_queue_free, mem, pageq);
801
802		simple_unlock(&vm_page_queue_free_lock);
803		splx(s);
804		/*
805		 * if pageout daemon needs pages, then tell it that there are
806		 * some free.
807		 */
808		if (vm_pageout_pages_needed) {
809			wakeup((caddr_t) &vm_pageout_pages_needed);
810			vm_pageout_pages_needed = 0;
811		}
812
813		cnt.v_free_count++;
814		/*
815		 * wakeup processes that are waiting on memory if we hit a
816		 * high water mark. And wakeup scheduler process if we have
817		 * lots of memory. this process will swapin processes.
818		 */
819		if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
820			wakeup((caddr_t) &cnt.v_free_count);
821			wakeup((caddr_t) &proc0);
822		}
823	} else {
824		splx(s);
825	}
826	cnt.v_tfree++;
827}
828
829
830/*
831 *	vm_page_wire:
832 *
833 *	Mark this page as wired down by yet
834 *	another map, removing it from paging queues
835 *	as necessary.
836 *
837 *	The page queues must be locked.
838 */
839void
840vm_page_wire(mem)
841	register vm_page_t mem;
842{
843	int s;
844	VM_PAGE_CHECK(mem);
845
846	if (mem->wire_count == 0) {
847		s = splhigh();
848		vm_page_unqueue(mem);
849		splx(s);
850		cnt.v_wire_count++;
851	}
852	mem->flags |= PG_WRITEABLE|PG_MAPPED;
853	mem->wire_count++;
854}
855
856/*
857 *	vm_page_unwire:
858 *
859 *	Release one wiring of this page, potentially
860 *	enabling it to be paged again.
861 *
862 *	The page queues must be locked.
863 */
864void
865vm_page_unwire(mem)
866	register vm_page_t mem;
867{
868	int s;
869
870	VM_PAGE_CHECK(mem);
871
872	s = splhigh();
873
874	if (mem->wire_count)
875		mem->wire_count--;
876	if (mem->wire_count == 0) {
877		TAILQ_INSERT_TAIL(&vm_page_queue_active, mem, pageq);
878		cnt.v_active_count++;
879		mem->flags |= PG_ACTIVE;
880		cnt.v_wire_count--;
881	}
882	splx(s);
883}
884
885/*
886 *	vm_page_activate:
887 *
888 *	Put the specified page on the active list (if appropriate).
889 *
890 *	The page queues must be locked.
891 */
892void
893vm_page_activate(m)
894	register vm_page_t m;
895{
896	int s;
897
898	VM_PAGE_CHECK(m);
899
900	s = splhigh();
901	if (m->flags & PG_ACTIVE)
902		panic("vm_page_activate: already active");
903
904	if (m->flags & PG_CACHE)
905		cnt.v_reactivated++;
906
907	vm_page_unqueue(m);
908
909	if (m->wire_count == 0) {
910		TAILQ_INSERT_TAIL(&vm_page_queue_active, m, pageq);
911		m->flags |= PG_ACTIVE;
912		if (m->act_count < 5)
913			m->act_count = 5;
914		else if( m->act_count < ACT_MAX)
915			m->act_count += 1;
916		cnt.v_active_count++;
917	}
918	splx(s);
919}
920
921/*
922 *	vm_page_deactivate:
923 *
924 *	Returns the given page to the inactive list,
925 *	indicating that no physical maps have access
926 *	to this page.  [Used by the physical mapping system.]
927 *
928 *	The page queues must be locked.
929 */
930void
931vm_page_deactivate(m)
932	register vm_page_t m;
933{
934	int spl;
935
936	VM_PAGE_CHECK(m);
937
938	/*
939	 * Only move active pages -- ignore locked or already inactive ones.
940	 *
941	 * XXX: sometimes we get pages which aren't wired down or on any queue -
942	 * we need to put them on the inactive queue also, otherwise we lose
943	 * track of them. Paul Mackerras (paulus@cs.anu.edu.au) 9-Jan-93.
944	 */
945
946	spl = splhigh();
947	if (!(m->flags & PG_INACTIVE) && m->wire_count == 0 &&
948	    m->hold_count == 0) {
949		pmap_clear_reference(VM_PAGE_TO_PHYS(m));
950		if (m->flags & PG_CACHE)
951			cnt.v_reactivated++;
952		vm_page_unqueue(m);
953		TAILQ_INSERT_TAIL(&vm_page_queue_inactive, m, pageq);
954		m->flags |= PG_INACTIVE;
955		cnt.v_inactive_count++;
956		m->act_count = 0;
957	}
958	splx(spl);
959}
960
961/*
962 * vm_page_cache
963 *
964 * Put the specified page onto the page cache queue (if appropriate).
965 */
966void
967vm_page_cache(m)
968	register vm_page_t m;
969{
970	int s;
971
972	VM_PAGE_CHECK(m);
973	if ((m->flags & (PG_CACHE | PG_BUSY)) || m->busy || m->wire_count ||
974	    m->bmapped)
975		return;
976
977	s = splhigh();
978	vm_page_unqueue(m);
979	vm_page_protect(m, VM_PROT_NONE);
980
981	TAILQ_INSERT_TAIL(&vm_page_queue_cache, m, pageq);
982	m->flags |= PG_CACHE;
983	cnt.v_cache_count++;
984	if ((cnt.v_free_count + cnt.v_cache_count) == cnt.v_free_min) {
985		wakeup((caddr_t) &cnt.v_free_count);
986		wakeup((caddr_t) &proc0);
987	}
988	if (vm_pageout_pages_needed) {
989		wakeup((caddr_t) &vm_pageout_pages_needed);
990		vm_pageout_pages_needed = 0;
991	}
992
993	splx(s);
994}
995
996/*
997 *	vm_page_zero_fill:
998 *
999 *	Zero-fill the specified page.
1000 *	Written as a standard pagein routine, to
1001 *	be used by the zero-fill object.
1002 */
1003boolean_t
1004vm_page_zero_fill(m)
1005	vm_page_t m;
1006{
1007	VM_PAGE_CHECK(m);
1008
1009	pmap_zero_page(VM_PAGE_TO_PHYS(m));
1010	m->valid = VM_PAGE_BITS_ALL;
1011	return (TRUE);
1012}
1013
1014/*
1015 *	vm_page_copy:
1016 *
1017 *	Copy one page to another
1018 */
1019void
1020vm_page_copy(src_m, dest_m)
1021	vm_page_t src_m;
1022	vm_page_t dest_m;
1023{
1024	VM_PAGE_CHECK(src_m);
1025	VM_PAGE_CHECK(dest_m);
1026
1027	pmap_copy_page(VM_PAGE_TO_PHYS(src_m), VM_PAGE_TO_PHYS(dest_m));
1028	dest_m->valid = VM_PAGE_BITS_ALL;
1029}
1030
1031
1032/*
1033 * mapping function for valid bits or for dirty bits in
1034 * a page
1035 */
1036inline int
1037vm_page_bits(int base, int size)
1038{
1039	u_short chunk;
1040
1041	if ((base == 0) && (size >= PAGE_SIZE))
1042		return VM_PAGE_BITS_ALL;
1043	size = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
1044	base = (base % PAGE_SIZE) / DEV_BSIZE;
1045	chunk = vm_page_dev_bsize_chunks[size / DEV_BSIZE];
1046	return (chunk << base) & VM_PAGE_BITS_ALL;
1047}
1048
1049/*
1050 * set a page (partially) valid
1051 */
1052void
1053vm_page_set_valid(m, base, size)
1054	vm_page_t m;
1055	int base;
1056	int size;
1057{
1058	m->valid |= vm_page_bits(base, size);
1059}
1060
1061/*
1062 * set a page (partially) invalid
1063 */
1064void
1065vm_page_set_invalid(m, base, size)
1066	vm_page_t m;
1067	int base;
1068	int size;
1069{
1070	int bits;
1071
1072	m->valid &= ~(bits = vm_page_bits(base, size));
1073	if (m->valid == 0)
1074		m->dirty &= ~bits;
1075}
1076
1077/*
1078 * is (partial) page valid?
1079 */
1080int
1081vm_page_is_valid(m, base, size)
1082	vm_page_t m;
1083	int base;
1084	int size;
1085{
1086	int bits = vm_page_bits(base, size);
1087
1088	if (m->valid && ((m->valid & bits) == bits))
1089		return 1;
1090	else
1091		return 0;
1092}
1093
1094
1095/*
1096 * set a page (partially) dirty
1097 */
1098void
1099vm_page_set_dirty(m, base, size)
1100	vm_page_t m;
1101	int base;
1102	int size;
1103{
1104	if ((base != 0) || (size != PAGE_SIZE)) {
1105		if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1106			m->dirty = VM_PAGE_BITS_ALL;
1107			pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1108			return;
1109		}
1110		m->dirty |= vm_page_bits(base, size);
1111	} else {
1112		m->dirty = VM_PAGE_BITS_ALL;
1113		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1114	}
1115}
1116
1117void
1118vm_page_test_dirty(m)
1119	vm_page_t m;
1120{
1121	if ((m->dirty != VM_PAGE_BITS_ALL) &&
1122	    pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1123		m->dirty = VM_PAGE_BITS_ALL;
1124	}
1125}
1126
1127/*
1128 * set a page (partially) clean
1129 */
1130void
1131vm_page_set_clean(m, base, size)
1132	vm_page_t m;
1133	int base;
1134	int size;
1135{
1136	m->dirty &= ~vm_page_bits(base, size);
1137	if( base == 0 && size == PAGE_SIZE)
1138		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1139}
1140
1141/*
1142 * is (partial) page clean
1143 */
1144int
1145vm_page_is_clean(m, base, size)
1146	vm_page_t m;
1147	int base;
1148	int size;
1149{
1150	if (pmap_is_modified(VM_PAGE_TO_PHYS(m))) {
1151		m->dirty = VM_PAGE_BITS_ALL;
1152		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
1153	}
1154	if ((m->dirty & m->valid & vm_page_bits(base, size)) == 0)
1155		return 1;
1156	else
1157		return 0;
1158}
1159
1160#ifdef DDB
1161void
1162print_page_info()
1163{
1164	printf("cnt.v_free_count: %d\n", cnt.v_free_count);
1165	printf("cnt.v_cache_count: %d\n", cnt.v_cache_count);
1166	printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count);
1167	printf("cnt.v_active_count: %d\n", cnt.v_active_count);
1168	printf("cnt.v_wire_count: %d\n", cnt.v_wire_count);
1169	printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved);
1170	printf("cnt.v_free_min: %d\n", cnt.v_free_min);
1171	printf("cnt.v_free_target: %d\n", cnt.v_free_target);
1172	printf("cnt.v_cache_min: %d\n", cnt.v_cache_min);
1173	printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target);
1174}
1175#endif
1176