uvm_page.c revision 1.65
1/*	$OpenBSD: uvm_page.c,v 1.65 2008/04/09 16:58:11 deraadt Exp $	*/
2/*	$NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $	*/
3
4/*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by Charles D. Cranor,
24 *      Washington University, the University of California, Berkeley and
25 *      its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70/*
71 * uvm_page.c: page ops.
72 */
73
74#define UVM_PAGE                /* pull in uvm_page.h functions */
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/malloc.h>
78#include <sys/sched.h>
79#include <sys/kernel.h>
80#include <sys/vnode.h>
81
82#include <uvm/uvm.h>
83
84/*
85 * global vars... XXXCDC: move to uvm. structure.
86 */
87
88/*
89 * physical memory config is stored in vm_physmem.
90 */
91
92struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
93int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
94
95/*
96 * Some supported CPUs in a given architecture don't support all
97 * of the things necessary to do idle page zero'ing efficiently.
98 * We therefore provide a way to disable it from machdep code here.
99 */
100
101/*
102 * XXX disabled until we can find a way to do this without causing
103 * problems for either cpu caches or DMA latency.
104 */
105boolean_t vm_page_zero_enable = FALSE;
106
107/*
108 * local variables
109 */
110
111/*
112 * these variables record the values returned by vm_page_bootstrap,
113 * for debugging purposes.  The implementation of uvm_pageboot_alloc
114 * and pmap_startup here also uses them internally.
115 */
116
117static vaddr_t      virtual_space_start;
118static vaddr_t      virtual_space_end;
119
120/*
121 * we use a hash table with only one bucket during bootup.  we will
122 * later rehash (resize) the hash table once the allocator is ready.
123 * we static allocate the one bootstrap bucket below...
124 */
125
126static struct pglist uvm_bootbucket;
127
128/*
129 * History
130 */
131UVMHIST_DECL(pghist);
132
133/*
134 * local prototypes
135 */
136
137static void uvm_pageinsert(struct vm_page *);
138static void uvm_pageremove(struct vm_page *);
139
140/*
141 * inline functions
142 */
143
144/*
145 * uvm_pageinsert: insert a page in the object and the hash table
146 *
147 * => caller must lock object
148 * => caller must lock page queues
149 * => call should have already set pg's object and offset pointers
150 *    and bumped the version counter
151 */
152
153__inline static void
154uvm_pageinsert(struct vm_page *pg)
155{
156	struct pglist *buck;
157	int s;
158	UVMHIST_FUNC("uvm_pageinsert"); UVMHIST_CALLED(pghist);
159
160	KASSERT((pg->pg_flags & PG_TABLED) == 0);
161	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
162	s = splvm();
163	simple_lock(&uvm.hashlock);
164	TAILQ_INSERT_TAIL(buck, pg, hashq);	/* put in hash */
165	simple_unlock(&uvm.hashlock);
166	splx(s);
167
168	TAILQ_INSERT_TAIL(&pg->uobject->memq, pg, listq); /* put in object */
169	atomic_setbits_int(&pg->pg_flags, PG_TABLED);
170	pg->uobject->uo_npages++;
171}
172
173/*
174 * uvm_page_remove: remove page from object and hash
175 *
176 * => caller must lock object
177 * => caller must lock page queues
178 */
179
180static __inline void
181uvm_pageremove(struct vm_page *pg)
182{
183	struct pglist *buck;
184	int s;
185	UVMHIST_FUNC("uvm_pageremove"); UVMHIST_CALLED(pghist);
186
187	KASSERT(pg->pg_flags & PG_TABLED);
188	buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
189	s = splvm();
190	simple_lock(&uvm.hashlock);
191	TAILQ_REMOVE(buck, pg, hashq);
192	simple_unlock(&uvm.hashlock);
193	splx(s);
194
195#ifdef UBC
196	if (pg->uobject->pgops == &uvm_vnodeops) {
197		uvm_pgcnt_vnode--;
198	}
199#endif
200
201	/* object should be locked */
202	TAILQ_REMOVE(&pg->uobject->memq, pg, listq);
203
204	atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
205	pg->uobject->uo_npages--;
206	pg->uobject = NULL;
207	pg->pg_version++;
208}
209
210/*
211 * uvm_page_init: init the page system.   called from uvm_init().
212 *
213 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
214 */
215
216void
217uvm_page_init(kvm_startp, kvm_endp)
218	vaddr_t *kvm_startp, *kvm_endp;
219{
220	vsize_t freepages, pagecount, n;
221	vm_page_t pagearray;
222	int lcv, i;
223	paddr_t paddr;
224#if defined(UVMHIST)
225	static struct uvm_history_ent pghistbuf[100];
226#endif
227
228	UVMHIST_FUNC("uvm_page_init");
229	UVMHIST_INIT_STATIC(pghist, pghistbuf);
230	UVMHIST_CALLED(pghist);
231
232	/*
233	 * init the page queues and page queue locks
234	 */
235
236	for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
237		for (i = 0; i < PGFL_NQUEUES; i++)
238			TAILQ_INIT(&uvm.page_free[lcv].pgfl_queues[i]);
239	}
240	TAILQ_INIT(&uvm.page_active);
241	TAILQ_INIT(&uvm.page_inactive_swp);
242	TAILQ_INIT(&uvm.page_inactive_obj);
243	simple_lock_init(&uvm.pageqlock);
244	mtx_init(&uvm.fpageqlock, IPL_VM);
245
246	/*
247	 * init the <obj,offset> => <page> hash table.  for now
248	 * we just have one bucket (the bootstrap bucket).  later on we
249	 * will allocate new buckets as we dynamically resize the hash table.
250	 */
251
252	uvm.page_nhash = 1;			/* 1 bucket */
253	uvm.page_hashmask = 0;			/* mask for hash function */
254	uvm.page_hash = &uvm_bootbucket;	/* install bootstrap bucket */
255	TAILQ_INIT(uvm.page_hash);		/* init hash table */
256	simple_lock_init(&uvm.hashlock);	/* init hash table lock */
257
258	/*
259	 * allocate vm_page structures.
260	 */
261
262	/*
263	 * sanity check:
264	 * before calling this function the MD code is expected to register
265	 * some free RAM with the uvm_page_physload() function.   our job
266	 * now is to allocate vm_page structures for this memory.
267	 */
268
269	if (vm_nphysseg == 0)
270		panic("uvm_page_bootstrap: no memory pre-allocated");
271
272	/*
273	 * first calculate the number of free pages...
274	 *
275	 * note that we use start/end rather than avail_start/avail_end.
276	 * this allows us to allocate extra vm_page structures in case we
277	 * want to return some memory to the pool after booting.
278	 */
279
280	freepages = 0;
281	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
282		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
283
284	/*
285	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
286	 * use.   for each page of memory we use we need a vm_page structure.
287	 * thus, the total number of pages we can use is the total size of
288	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
289	 * structure.   we add one to freepages as a fudge factor to avoid
290	 * truncation errors (since we can only allocate in terms of whole
291	 * pages).
292	 */
293
294	pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
295	    (PAGE_SIZE + sizeof(struct vm_page));
296	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
297	    sizeof(struct vm_page));
298	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
299
300	/*
301	 * init the vm_page structures and put them in the correct place.
302	 */
303
304	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
305		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
306		if (n > pagecount) {
307			printf("uvm_page_init: lost %ld page(s) in init\n",
308			    (long)(n - pagecount));
309			panic("uvm_page_init");  /* XXXCDC: shouldn't happen? */
310			/* n = pagecount; */
311		}
312
313		/* set up page array pointers */
314		vm_physmem[lcv].pgs = pagearray;
315		pagearray += n;
316		pagecount -= n;
317		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
318
319		/* init and free vm_pages (we've already zeroed them) */
320		paddr = ptoa(vm_physmem[lcv].start);
321		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
322			vm_physmem[lcv].pgs[i].phys_addr = paddr;
323#ifdef __HAVE_VM_PAGE_MD
324			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
325#endif
326			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
327			    atop(paddr) <= vm_physmem[lcv].avail_end) {
328				uvmexp.npages++;
329				/* add page to free pool */
330				uvm_pagefree(&vm_physmem[lcv].pgs[i]);
331			}
332		}
333	}
334
335	/*
336	 * pass up the values of virtual_space_start and
337	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
338	 * layers of the VM.
339	 */
340
341	*kvm_startp = round_page(virtual_space_start);
342	*kvm_endp = trunc_page(virtual_space_end);
343
344	/*
345	 * init locks for kernel threads
346	 */
347
348	simple_lock_init(&uvm.pagedaemon_lock);
349	simple_lock_init(&uvm.aiodoned_lock);
350
351	/*
352	 * init reserve thresholds
353	 * XXXCDC - values may need adjusting
354	 */
355	uvmexp.reserve_pagedaemon = 4;
356	uvmexp.reserve_kernel = 6;
357	uvmexp.anonminpct = 10;
358	uvmexp.vnodeminpct = 10;
359	uvmexp.vtextminpct = 5;
360	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
361	uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
362	uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
363
364  	/*
365	 * determine if we should zero pages in the idle loop.
366	 */
367
368	uvm.page_idle_zero = vm_page_zero_enable;
369
370	/*
371	 * done!
372	 */
373
374	uvm.page_init_done = TRUE;
375}
376
377/*
378 * uvm_setpagesize: set the page size
379 *
380 * => sets page_shift and page_mask from uvmexp.pagesize.
381 */
382
383void
384uvm_setpagesize()
385{
386	if (uvmexp.pagesize == 0)
387		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
388	uvmexp.pagemask = uvmexp.pagesize - 1;
389	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
390		panic("uvm_setpagesize: page size not a power of two");
391	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
392		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
393			break;
394}
395
396/*
397 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
398 */
399
400vaddr_t
401uvm_pageboot_alloc(size)
402	vsize_t size;
403{
404#if defined(PMAP_STEAL_MEMORY)
405	vaddr_t addr;
406
407	/*
408	 * defer bootstrap allocation to MD code (it may want to allocate
409	 * from a direct-mapped segment).  pmap_steal_memory should round
410	 * off virtual_space_start/virtual_space_end.
411	 */
412
413	addr = pmap_steal_memory(size, &virtual_space_start,
414	    &virtual_space_end);
415
416	return(addr);
417
418#else /* !PMAP_STEAL_MEMORY */
419
420	static boolean_t initialized = FALSE;
421	vaddr_t addr, vaddr;
422	paddr_t paddr;
423
424	/* round to page size */
425	size = round_page(size);
426
427	/*
428	 * on first call to this function, initialize ourselves.
429	 */
430	if (initialized == FALSE) {
431		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
432
433		/* round it the way we like it */
434		virtual_space_start = round_page(virtual_space_start);
435		virtual_space_end = trunc_page(virtual_space_end);
436
437		initialized = TRUE;
438	}
439
440	/*
441	 * allocate virtual memory for this request
442	 */
443	if (virtual_space_start == virtual_space_end ||
444	    (virtual_space_end - virtual_space_start) < size)
445		panic("uvm_pageboot_alloc: out of virtual space");
446
447	addr = virtual_space_start;
448
449#ifdef PMAP_GROWKERNEL
450	/*
451	 * If the kernel pmap can't map the requested space,
452	 * then allocate more resources for it.
453	 */
454	if (uvm_maxkaddr < (addr + size)) {
455		uvm_maxkaddr = pmap_growkernel(addr + size);
456		if (uvm_maxkaddr < (addr + size))
457			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
458	}
459#endif
460
461	virtual_space_start += size;
462
463	/*
464	 * allocate and mapin physical pages to back new virtual pages
465	 */
466
467	for (vaddr = round_page(addr) ; vaddr < addr + size ;
468	    vaddr += PAGE_SIZE) {
469
470		if (!uvm_page_physget(&paddr))
471			panic("uvm_pageboot_alloc: out of memory");
472
473		/*
474		 * Note this memory is no longer managed, so using
475		 * pmap_kenter is safe.
476		 */
477		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
478	}
479	pmap_update(pmap_kernel());
480	return(addr);
481#endif	/* PMAP_STEAL_MEMORY */
482}
483
484#if !defined(PMAP_STEAL_MEMORY)
485/*
486 * uvm_page_physget: "steal" one page from the vm_physmem structure.
487 *
488 * => attempt to allocate it off the end of a segment in which the "avail"
489 *    values match the start/end values.   if we can't do that, then we
490 *    will advance both values (making them equal, and removing some
491 *    vm_page structures from the non-avail area).
492 * => return false if out of memory.
493 */
494
495/* subroutine: try to allocate from memory chunks on the specified freelist */
496static boolean_t uvm_page_physget_freelist(paddr_t *, int);
497
498static boolean_t
499uvm_page_physget_freelist(paddrp, freelist)
500	paddr_t *paddrp;
501	int freelist;
502{
503	int lcv, x;
504	UVMHIST_FUNC("uvm_page_physget_freelist"); UVMHIST_CALLED(pghist);
505
506	/* pass 1: try allocating from a matching end */
507#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
508	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
509	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
510#else
511	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
512#endif
513	{
514
515		if (uvm.page_init_done == TRUE)
516			panic("uvm_page_physget: called _after_ bootstrap");
517
518		if (vm_physmem[lcv].free_list != freelist)
519			continue;
520
521		/* try from front */
522		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
523		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
524			*paddrp = ptoa(vm_physmem[lcv].avail_start);
525			vm_physmem[lcv].avail_start++;
526			vm_physmem[lcv].start++;
527			/* nothing left?   nuke it */
528			if (vm_physmem[lcv].avail_start ==
529			    vm_physmem[lcv].end) {
530				if (vm_nphysseg == 1)
531				    panic("uvm_page_physget: out of memory!");
532				vm_nphysseg--;
533				for (x = lcv ; x < vm_nphysseg ; x++)
534					/* structure copy */
535					vm_physmem[x] = vm_physmem[x+1];
536			}
537			return (TRUE);
538		}
539
540		/* try from rear */
541		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
542		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
543			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
544			vm_physmem[lcv].avail_end--;
545			vm_physmem[lcv].end--;
546			/* nothing left?   nuke it */
547			if (vm_physmem[lcv].avail_end ==
548			    vm_physmem[lcv].start) {
549				if (vm_nphysseg == 1)
550				    panic("uvm_page_physget: out of memory!");
551				vm_nphysseg--;
552				for (x = lcv ; x < vm_nphysseg ; x++)
553					/* structure copy */
554					vm_physmem[x] = vm_physmem[x+1];
555			}
556			return (TRUE);
557		}
558	}
559
560	/* pass2: forget about matching ends, just allocate something */
561#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
562	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
563	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
564#else
565	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
566#endif
567	{
568
569		/* any room in this bank? */
570		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
571			continue;  /* nope */
572
573		*paddrp = ptoa(vm_physmem[lcv].avail_start);
574		vm_physmem[lcv].avail_start++;
575		/* truncate! */
576		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
577
578		/* nothing left?   nuke it */
579		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
580			if (vm_nphysseg == 1)
581				panic("uvm_page_physget: out of memory!");
582			vm_nphysseg--;
583			for (x = lcv ; x < vm_nphysseg ; x++)
584				/* structure copy */
585				vm_physmem[x] = vm_physmem[x+1];
586		}
587		return (TRUE);
588	}
589
590	return (FALSE);        /* whoops! */
591}
592
593boolean_t
594uvm_page_physget(paddrp)
595	paddr_t *paddrp;
596{
597	int i;
598	UVMHIST_FUNC("uvm_page_physget"); UVMHIST_CALLED(pghist);
599
600	/* try in the order of freelist preference */
601	for (i = 0; i < VM_NFREELIST; i++)
602		if (uvm_page_physget_freelist(paddrp, i) == TRUE)
603			return (TRUE);
604	return (FALSE);
605}
606#endif /* PMAP_STEAL_MEMORY */
607
608/*
609 * uvm_page_physload: load physical memory into VM system
610 *
611 * => all args are PFs
612 * => all pages in start/end get vm_page structures
613 * => areas marked by avail_start/avail_end get added to the free page pool
614 * => we are limited to VM_PHYSSEG_MAX physical memory segments
615 */
616
617void
618uvm_page_physload(start, end, avail_start, avail_end, free_list)
619	paddr_t start, end, avail_start, avail_end;
620	int free_list;
621{
622	int preload, lcv;
623	psize_t npages;
624	struct vm_page *pgs;
625	struct vm_physseg *ps;
626
627	if (uvmexp.pagesize == 0)
628		panic("uvm_page_physload: page size not set!");
629
630	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
631		panic("uvm_page_physload: bad free list %d", free_list);
632
633	if (start >= end)
634		panic("uvm_page_physload: start >= end");
635
636	/*
637	 * do we have room?
638	 */
639	if (vm_nphysseg == VM_PHYSSEG_MAX) {
640		printf("uvm_page_physload: unable to load physical memory "
641		    "segment\n");
642		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
643		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
644		printf("\tincrease VM_PHYSSEG_MAX\n");
645		return;
646	}
647
648	/*
649	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
650	 * called yet, so malloc is not available).
651	 */
652	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
653		if (vm_physmem[lcv].pgs)
654			break;
655	}
656	preload = (lcv == vm_nphysseg);
657
658	/*
659	 * if VM is already running, attempt to malloc() vm_page structures
660	 */
661	if (!preload) {
662#if defined(VM_PHYSSEG_NOADD)
663		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
664#else
665		/* XXXCDC: need some sort of lockout for this case */
666		paddr_t paddr;
667		npages = end - start;  /* # of pages */
668		pgs = (vm_page *)uvm_km_alloc(kernel_map,
669		    sizeof(struct vm_page) * npages);
670		if (pgs == NULL) {
671			printf("uvm_page_physload: can not malloc vm_page "
672			    "structs for segment\n");
673			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
674			return;
675		}
676		/* zero data, init phys_addr and free_list, and free pages */
677		memset(pgs, 0, sizeof(struct vm_page) * npages);
678		for (lcv = 0, paddr = ptoa(start) ;
679				 lcv < npages ; lcv++, paddr += PAGE_SIZE) {
680			pgs[lcv].phys_addr = paddr;
681			pgs[lcv].free_list = free_list;
682			if (atop(paddr) >= avail_start &&
683			    atop(paddr) <= avail_end)
684				uvm_pagefree(&pgs[lcv]);
685		}
686		/* XXXCDC: incomplete: need to update uvmexp.free, what else? */
687		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
688#endif
689	} else {
690
691		/* gcc complains if these don't get init'd */
692		pgs = NULL;
693		npages = 0;
694
695	}
696
697	/*
698	 * now insert us in the proper place in vm_physmem[]
699	 */
700
701#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
702
703	/* random: put it at the end (easy!) */
704	ps = &vm_physmem[vm_nphysseg];
705
706#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
707
708	{
709		int x;
710		/* sort by address for binary search */
711		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
712			if (start < vm_physmem[lcv].start)
713				break;
714		ps = &vm_physmem[lcv];
715		/* move back other entries, if necessary ... */
716		for (x = vm_nphysseg ; x > lcv ; x--)
717			/* structure copy */
718			vm_physmem[x] = vm_physmem[x - 1];
719	}
720
721#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
722
723	{
724		int x;
725		/* sort by largest segment first */
726		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
727			if ((end - start) >
728			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
729				break;
730		ps = &vm_physmem[lcv];
731		/* move back other entries, if necessary ... */
732		for (x = vm_nphysseg ; x > lcv ; x--)
733			/* structure copy */
734			vm_physmem[x] = vm_physmem[x - 1];
735	}
736
737#else
738
739	panic("uvm_page_physload: unknown physseg strategy selected!");
740
741#endif
742
743	ps->start = start;
744	ps->end = end;
745	ps->avail_start = avail_start;
746	ps->avail_end = avail_end;
747	if (preload) {
748		ps->pgs = NULL;
749	} else {
750		ps->pgs = pgs;
751		ps->lastpg = pgs + npages - 1;
752	}
753	ps->free_list = free_list;
754	vm_nphysseg++;
755
756	/*
757	 * done!
758	 */
759
760	if (!preload)
761		uvm_page_rehash();
762
763	return;
764}
765
766/*
767 * uvm_page_rehash: reallocate hash table based on number of free pages.
768 */
769
770void
771uvm_page_rehash()
772{
773	int freepages, lcv, bucketcount, s, oldcount;
774	struct pglist *newbuckets, *oldbuckets;
775	struct vm_page *pg;
776	size_t newsize, oldsize;
777
778	/*
779	 * compute number of pages that can go in the free pool
780	 */
781
782	freepages = 0;
783	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
784		freepages +=
785		    (vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
786
787	/*
788	 * compute number of buckets needed for this number of pages
789	 */
790
791	bucketcount = 1;
792	while (bucketcount < freepages)
793		bucketcount = bucketcount * 2;
794
795	/*
796	 * compute the size of the current table and new table.
797	 */
798
799	oldbuckets = uvm.page_hash;
800	oldcount = uvm.page_nhash;
801	oldsize = round_page(sizeof(struct pglist) * oldcount);
802	newsize = round_page(sizeof(struct pglist) * bucketcount);
803
804	/*
805	 * allocate the new buckets
806	 */
807
808	newbuckets = (struct pglist *) uvm_km_alloc(kernel_map, newsize);
809	if (newbuckets == NULL) {
810		printf("uvm_page_physrehash: WARNING: could not grow page "
811		    "hash table\n");
812		return;
813	}
814	for (lcv = 0 ; lcv < bucketcount ; lcv++)
815		TAILQ_INIT(&newbuckets[lcv]);
816
817	/*
818	 * now replace the old buckets with the new ones and rehash everything
819	 */
820
821	s = splvm();
822	simple_lock(&uvm.hashlock);
823	uvm.page_hash = newbuckets;
824	uvm.page_nhash = bucketcount;
825	uvm.page_hashmask = bucketcount - 1;  /* power of 2 */
826
827	/* ... and rehash */
828	for (lcv = 0 ; lcv < oldcount ; lcv++) {
829		while ((pg = TAILQ_FIRST(&oldbuckets[lcv])) != NULL) {
830			TAILQ_REMOVE(&oldbuckets[lcv], pg, hashq);
831			TAILQ_INSERT_TAIL(
832			  &uvm.page_hash[uvm_pagehash(pg->uobject, pg->offset)],
833			  pg, hashq);
834		}
835	}
836	simple_unlock(&uvm.hashlock);
837	splx(s);
838
839	/*
840	 * free old bucket array if is not the boot-time table
841	 */
842
843	if (oldbuckets != &uvm_bootbucket)
844		uvm_km_free(kernel_map, (vaddr_t) oldbuckets, oldsize);
845
846	/*
847	 * done
848	 */
849	return;
850}
851
852
853#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
854
855void uvm_page_physdump(void); /* SHUT UP GCC */
856
857/* call from DDB */
858void
859uvm_page_physdump()
860{
861	int lcv;
862
863	printf("rehash: physical memory config [segs=%d of %d]:\n",
864				 vm_nphysseg, VM_PHYSSEG_MAX);
865	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
866		printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
867		    (long long)vm_physmem[lcv].start,
868		    (long long)vm_physmem[lcv].end,
869		    (long long)vm_physmem[lcv].avail_start,
870		    (long long)vm_physmem[lcv].avail_end);
871	printf("STRATEGY = ");
872	switch (VM_PHYSSEG_STRAT) {
873	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
874	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
875	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
876	default: printf("<<UNKNOWN>>!!!!\n");
877	}
878	printf("number of buckets = %d\n", uvm.page_nhash);
879}
880#endif
881
882void
883uvm_shutdown(void)
884{
885}
886
887/*
888 * uvm_pagealloc_strat: allocate vm_page from a particular free list.
889 *
890 * => return null if no pages free
891 * => wake up pagedaemon if number of free pages drops below low water mark
892 * => if obj != NULL, obj must be locked (to put in hash)
893 * => if anon != NULL, anon must be locked (to put in anon)
894 * => only one of obj or anon can be non-null
895 * => caller must activate/deactivate page if it is not wired.
896 * => free_list is ignored if strat == UVM_PGA_STRAT_NORMAL.
897 * => policy decision: it is more important to pull a page off of the
898 *	appropriate priority free list than it is to get a zero'd or
899 *	unknown contents page.  This is because we live with the
900 *	consequences of a bad free list decision for the entire
901 *	lifetime of the page, e.g. if the page comes from memory that
902 *	is slower to access.
903 */
904
905struct vm_page *
906uvm_pagealloc_strat(obj, off, anon, flags, strat, free_list)
907	struct uvm_object *obj;
908	voff_t off;
909	int flags;
910	struct vm_anon *anon;
911	int strat, free_list;
912{
913	int lcv, try1, try2, zeroit = 0;
914	struct vm_page *pg;
915	struct pglist *freeq;
916	struct pgfreelist *pgfl;
917	boolean_t use_reserve;
918	UVMHIST_FUNC("uvm_pagealloc_strat"); UVMHIST_CALLED(pghist);
919
920	KASSERT(obj == NULL || anon == NULL);
921	KASSERT(off == trunc_page(off));
922
923	uvm_lock_fpageq();
924
925	/*
926	 * check to see if we need to generate some free pages waking
927	 * the pagedaemon.
928	 */
929
930#ifdef UBC
931	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
932	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
933	     uvmexp.inactive < uvmexp.inactarg)) {
934		wakeup(&uvm.pagedaemon);
935	}
936#else
937	if (uvmexp.free < uvmexp.freemin || (uvmexp.free < uvmexp.freetarg &&
938	    uvmexp.inactive < uvmexp.inactarg))
939		wakeup(&uvm.pagedaemon);
940#endif
941
942	/*
943	 * fail if any of these conditions is true:
944	 * [1]  there really are no free pages, or
945	 * [2]  only kernel "reserved" pages remain and
946	 *        the page isn't being allocated to a kernel object.
947	 * [3]  only pagedaemon "reserved" pages remain and
948	 *        the requestor isn't the pagedaemon.
949	 */
950
951	use_reserve = (flags & UVM_PGA_USERESERVE) ||
952		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
953	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
954	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
955	     !(use_reserve && (curproc == uvm.pagedaemon_proc ||
956				curproc == syncerproc))))
957		goto fail;
958
959#if PGFL_NQUEUES != 2
960#error uvm_pagealloc_strat needs to be updated
961#endif
962
963	/*
964	 * If we want a zero'd page, try the ZEROS queue first, otherwise
965	 * we try the UNKNOWN queue first.
966	 */
967	if (flags & UVM_PGA_ZERO) {
968		try1 = PGFL_ZEROS;
969		try2 = PGFL_UNKNOWN;
970	} else {
971		try1 = PGFL_UNKNOWN;
972		try2 = PGFL_ZEROS;
973	}
974
975	UVMHIST_LOG(pghist, "obj=%p off=%lx anon=%p flags=%lx",
976	    obj, (u_long)off, anon, flags);
977	UVMHIST_LOG(pghist, "strat=%ld free_list=%ld", strat, free_list, 0, 0);
978 again:
979	switch (strat) {
980	case UVM_PGA_STRAT_NORMAL:
981		/* Check all freelists in descending priority order. */
982		for (lcv = 0; lcv < VM_NFREELIST; lcv++) {
983			pgfl = &uvm.page_free[lcv];
984			if ((pg = TAILQ_FIRST((freeq =
985			      &pgfl->pgfl_queues[try1]))) != NULL ||
986			    (pg = TAILQ_FIRST((freeq =
987			      &pgfl->pgfl_queues[try2]))) != NULL)
988				goto gotit;
989		}
990
991		/* No pages free! */
992		goto fail;
993
994	case UVM_PGA_STRAT_ONLY:
995	case UVM_PGA_STRAT_FALLBACK:
996		/* Attempt to allocate from the specified free list. */
997		KASSERT(free_list >= 0 && free_list < VM_NFREELIST);
998		pgfl = &uvm.page_free[free_list];
999		if ((pg = TAILQ_FIRST((freeq =
1000		      &pgfl->pgfl_queues[try1]))) != NULL ||
1001		    (pg = TAILQ_FIRST((freeq =
1002		      &pgfl->pgfl_queues[try2]))) != NULL)
1003			goto gotit;
1004
1005		/* Fall back, if possible. */
1006		if (strat == UVM_PGA_STRAT_FALLBACK) {
1007			strat = UVM_PGA_STRAT_NORMAL;
1008			goto again;
1009		}
1010
1011		/* No pages free! */
1012		goto fail;
1013
1014	default:
1015		panic("uvm_pagealloc_strat: bad strat %d", strat);
1016		/* NOTREACHED */
1017	}
1018
1019 gotit:
1020	TAILQ_REMOVE(freeq, pg, pageq);
1021	uvmexp.free--;
1022
1023	/* update zero'd page count */
1024	if (pg->pg_flags & PG_ZERO)
1025		uvmexp.zeropages--;
1026
1027	/*
1028	 * update allocation statistics and remember if we have to
1029	 * zero the page
1030	 */
1031	if (flags & UVM_PGA_ZERO) {
1032		if (pg->pg_flags & PG_ZERO) {
1033			uvmexp.pga_zerohit++;
1034			zeroit = 0;
1035		} else {
1036			uvmexp.pga_zeromiss++;
1037			zeroit = 1;
1038		}
1039	}
1040
1041	uvm_unlock_fpageq();		/* unlock free page queue */
1042
1043	pg->offset = off;
1044	pg->uobject = obj;
1045	pg->uanon = anon;
1046	pg->pg_flags = PG_BUSY|PG_CLEAN|PG_FAKE;
1047	pg->pg_version++;
1048	if (anon) {
1049		anon->an_page = pg;
1050		atomic_setbits_int(&pg->pg_flags, PQ_ANON);
1051#ifdef UBC
1052		uvm_pgcnt_anon++;
1053#endif
1054	} else {
1055		if (obj)
1056			uvm_pageinsert(pg);
1057	}
1058#if defined(UVM_PAGE_TRKOWN)
1059	pg->owner_tag = NULL;
1060#endif
1061	UVM_PAGE_OWN(pg, "new alloc");
1062
1063	if (flags & UVM_PGA_ZERO) {
1064		/*
1065		 * A zero'd page is not clean.  If we got a page not already
1066		 * zero'd, then we have to zero it ourselves.
1067		 */
1068		atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1069		if (zeroit)
1070			pmap_zero_page(pg);
1071	}
1072
1073	UVMHIST_LOG(pghist, "allocated pg %p/%lx", pg,
1074	    (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
1075	return(pg);
1076
1077 fail:
1078	uvm_unlock_fpageq();
1079	UVMHIST_LOG(pghist, "failed!", 0, 0, 0, 0);
1080	return (NULL);
1081}
1082
1083/*
1084 * uvm_pagerealloc: reallocate a page from one object to another
1085 *
1086 * => both objects must be locked
1087 */
1088
1089void
1090uvm_pagerealloc(pg, newobj, newoff)
1091	struct vm_page *pg;
1092	struct uvm_object *newobj;
1093	voff_t newoff;
1094{
1095
1096	UVMHIST_FUNC("uvm_pagerealloc"); UVMHIST_CALLED(pghist);
1097
1098	/*
1099	 * remove it from the old object
1100	 */
1101
1102	if (pg->uobject) {
1103		uvm_pageremove(pg);
1104	}
1105
1106	/*
1107	 * put it in the new object
1108	 */
1109
1110	if (newobj) {
1111		pg->uobject = newobj;
1112		pg->offset = newoff;
1113		pg->pg_version++;
1114		uvm_pageinsert(pg);
1115	}
1116}
1117
1118
1119/*
1120 * uvm_pagefree: free page
1121 *
1122 * => erase page's identity (i.e. remove from hash/object)
1123 * => put page on free list
1124 * => caller must lock owning object (either anon or uvm_object)
1125 * => caller must lock page queues
1126 * => assumes all valid mappings of pg are gone
1127 */
1128
1129void
1130uvm_pagefree(struct vm_page *pg)
1131{
1132	int saved_loan_count = pg->loan_count;
1133	UVMHIST_FUNC("uvm_pagefree"); UVMHIST_CALLED(pghist);
1134
1135#ifdef DEBUG
1136	if (pg->uobject == (void *)0xdeadbeef &&
1137	    pg->uanon == (void *)0xdeadbeef) {
1138		panic("uvm_pagefree: freeing free page %p", pg);
1139	}
1140#endif
1141
1142	UVMHIST_LOG(pghist, "freeing pg %p/%lx", pg,
1143	    (u_long)VM_PAGE_TO_PHYS(pg), 0, 0);
1144
1145	/*
1146	 * if the page was an object page (and thus "TABLED"), remove it
1147	 * from the object.
1148	 */
1149
1150	if (pg->pg_flags & PG_TABLED) {
1151
1152		/*
1153		 * if the object page is on loan we are going to drop ownership.
1154		 * it is possible that an anon will take over as owner for this
1155		 * page later on.   the anon will want a !PG_CLEAN page so that
1156		 * it knows it needs to allocate swap if it wants to page the
1157		 * page out.
1158		 */
1159
1160		/* in case an anon takes over */
1161		if (saved_loan_count)
1162			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1163		uvm_pageremove(pg);
1164
1165		/*
1166		 * if our page was on loan, then we just lost control over it
1167		 * (in fact, if it was loaned to an anon, the anon may have
1168		 * already taken over ownership of the page by now and thus
1169		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
1170		 * return (when the last loan is dropped, then the page can be
1171		 * freed by whatever was holding the last loan).
1172		 */
1173
1174		if (saved_loan_count)
1175			return;
1176	} else if (saved_loan_count && pg->uanon) {
1177		/*
1178		 * if our page is owned by an anon and is loaned out to the
1179		 * kernel then we just want to drop ownership and return.
1180		 * the kernel must free the page when all its loans clear ...
1181		 * note that the kernel can't change the loan status of our
1182		 * page as long as we are holding PQ lock.
1183		 */
1184		atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
1185		pg->uanon->an_page = NULL;
1186		pg->uanon = NULL;
1187		return;
1188	}
1189	KASSERT(saved_loan_count == 0);
1190
1191	/*
1192	 * now remove the page from the queues
1193	 */
1194
1195	if (pg->pg_flags & PQ_ACTIVE) {
1196		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1197		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1198		uvmexp.active--;
1199	}
1200	if (pg->pg_flags & PQ_INACTIVE) {
1201		if (pg->pg_flags & PQ_SWAPBACKED)
1202			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1203		else
1204			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1205		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1206		uvmexp.inactive--;
1207	}
1208
1209	/*
1210	 * if the page was wired, unwire it now.
1211	 */
1212
1213	if (pg->wire_count) {
1214		pg->wire_count = 0;
1215		uvmexp.wired--;
1216	}
1217	if (pg->uanon) {
1218		pg->uanon->an_page = NULL;
1219#ifdef UBC
1220		uvm_pgcnt_anon--;
1221#endif
1222	}
1223
1224	/*
1225	 * and put on free queue
1226	 */
1227
1228	atomic_clearbits_int(&pg->pg_flags, PG_ZERO);
1229
1230	uvm_lock_fpageq();
1231	TAILQ_INSERT_TAIL(&uvm.page_free[
1232	    uvm_page_lookup_freelist(pg)].pgfl_queues[PGFL_UNKNOWN], pg, pageq);
1233	atomic_clearbits_int(&pg->pg_flags, PQ_MASK);
1234	atomic_setbits_int(&pg->pg_flags, PQ_FREE);
1235#ifdef DEBUG
1236	pg->uobject = (void *)0xdeadbeef;
1237	pg->offset = 0xdeadbeef;
1238	pg->uanon = (void *)0xdeadbeef;
1239#endif
1240	uvmexp.free++;
1241
1242	if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
1243		uvm.page_idle_zero = vm_page_zero_enable;
1244
1245	uvm_unlock_fpageq();
1246}
1247
1248/*
1249 * uvm_page_unbusy: unbusy an array of pages.
1250 *
1251 * => pages must either all belong to the same object, or all belong to anons.
1252 * => if pages are object-owned, object must be locked.
1253 * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
1254 */
1255
1256void
1257uvm_page_unbusy(pgs, npgs)
1258	struct vm_page **pgs;
1259	int npgs;
1260{
1261	struct vm_page *pg;
1262	struct uvm_object *uobj;
1263	int i;
1264	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(pdhist);
1265
1266	for (i = 0; i < npgs; i++) {
1267		pg = pgs[i];
1268
1269		if (pg == NULL || pg == PGO_DONTCARE) {
1270			continue;
1271		}
1272		if (pg->pg_flags & PG_WANTED) {
1273			wakeup(pg);
1274		}
1275		if (pg->pg_flags & PG_RELEASED) {
1276			UVMHIST_LOG(pdhist, "releasing pg %p", pg,0,0,0);
1277			uobj = pg->uobject;
1278			if (uobj != NULL) {
1279				uobj->pgops->pgo_releasepg(pg, NULL);
1280			} else {
1281				atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
1282				UVM_PAGE_OWN(pg, NULL);
1283				uvm_anfree(pg->uanon);
1284			}
1285		} else {
1286			UVMHIST_LOG(pdhist, "unbusying pg %p", pg,0,0,0);
1287			atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1288			UVM_PAGE_OWN(pg, NULL);
1289		}
1290	}
1291}
1292
1293#if defined(UVM_PAGE_TRKOWN)
1294/*
1295 * uvm_page_own: set or release page ownership
1296 *
1297 * => this is a debugging function that keeps track of who sets PG_BUSY
1298 *	and where they do it.   it can be used to track down problems
1299 *	such a process setting "PG_BUSY" and never releasing it.
1300 * => page's object [if any] must be locked
1301 * => if "tag" is NULL then we are releasing page ownership
1302 */
1303void
1304uvm_page_own(pg, tag)
1305	struct vm_page *pg;
1306	char *tag;
1307{
1308	/* gain ownership? */
1309	if (tag) {
1310		if (pg->owner_tag) {
1311			printf("uvm_page_own: page %p already owned "
1312			    "by proc %d [%s]\n", pg,
1313			     pg->owner, pg->owner_tag);
1314			panic("uvm_page_own");
1315		}
1316		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
1317		pg->owner_tag = tag;
1318		return;
1319	}
1320
1321	/* drop ownership */
1322	if (pg->owner_tag == NULL) {
1323		printf("uvm_page_own: dropping ownership of an non-owned "
1324		    "page (%p)\n", pg);
1325		panic("uvm_page_own");
1326	}
1327	pg->owner_tag = NULL;
1328	return;
1329}
1330#endif
1331
1332/*
1333 * uvm_pageidlezero: zero free pages while the system is idle.
1334 *
1335 * => we do at least one iteration per call, if we are below the target.
1336 * => we loop until we either reach the target or whichqs indicates that
1337 *	there is a process ready to run.
1338 */
1339void
1340uvm_pageidlezero()
1341{
1342	struct vm_page *pg;
1343	struct pgfreelist *pgfl;
1344	int free_list;
1345	UVMHIST_FUNC("uvm_pageidlezero"); UVMHIST_CALLED(pghist);
1346
1347	do {
1348		uvm_lock_fpageq();
1349
1350		if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
1351			uvm.page_idle_zero = FALSE;
1352			uvm_unlock_fpageq();
1353			return;
1354		}
1355
1356		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1357			pgfl = &uvm.page_free[free_list];
1358			if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
1359			    PGFL_UNKNOWN])) != NULL)
1360				break;
1361		}
1362
1363		if (pg == NULL) {
1364			/*
1365			 * No non-zero'd pages; don't bother trying again
1366			 * until we know we have non-zero'd pages free.
1367			 */
1368			uvm.page_idle_zero = FALSE;
1369			uvm_unlock_fpageq();
1370			return;
1371		}
1372
1373		TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
1374		uvmexp.free--;
1375		uvm_unlock_fpageq();
1376
1377#ifdef PMAP_PAGEIDLEZERO
1378		if (PMAP_PAGEIDLEZERO(pg) == FALSE) {
1379			/*
1380			 * The machine-dependent code detected some
1381			 * reason for us to abort zeroing pages,
1382			 * probably because there is a process now
1383			 * ready to run.
1384			 */
1385			uvm_lock_fpageq();
1386			TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
1387			    pg, pageq);
1388			uvmexp.free++;
1389			uvmexp.zeroaborts++;
1390			uvm_unlock_fpageq();
1391			return;
1392		}
1393#else
1394		/*
1395		 * XXX This will toast the cache unless the pmap_zero_page()
1396		 * XXX implementation does uncached access.
1397		 */
1398		pmap_zero_page(pg);
1399#endif
1400		atomic_setbits_int(&pg->pg_flags, PG_ZERO);
1401
1402		uvm_lock_fpageq();
1403		TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
1404		uvmexp.free++;
1405		uvmexp.zeropages++;
1406		uvm_unlock_fpageq();
1407	} while (sched_is_idle());
1408}
1409