uvm_page.c revision 1.121
1/*	$OpenBSD: uvm_page.c,v 1.121 2013/03/12 21:08:04 deraadt Exp $	*/
2/*	$NetBSD: uvm_page.c,v 1.44 2000/11/27 08:40:04 chs Exp $	*/
3
4/*
5 * Copyright (c) 1997 Charles D. Cranor and Washington University.
6 * Copyright (c) 1991, 1993, The Regents of the University of California.
7 *
8 * All rights reserved.
9 *
10 * This code is derived from software contributed to Berkeley by
11 * The Mach Operating System project at Carnegie-Mellon University.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by Charles D. Cranor,
24 *      Washington University, the University of California, Berkeley and
25 *      its contributors.
26 * 4. Neither the name of the University nor the names of its contributors
27 *    may be used to endorse or promote products derived from this software
28 *    without specific prior written permission.
29 *
30 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * SUCH DAMAGE.
41 *
42 *	@(#)vm_page.c   8.3 (Berkeley) 3/21/94
43 * from: Id: uvm_page.c,v 1.1.2.18 1998/02/06 05:24:42 chs Exp
44 *
45 *
46 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
47 * All rights reserved.
48 *
49 * Permission to use, copy, modify and distribute this software and
50 * its documentation is hereby granted, provided that both the copyright
51 * notice and this permission notice appear in all copies of the
52 * software, derivative works or modified versions, and any portions
53 * thereof, and that both notices appear in supporting documentation.
54 *
55 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
56 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
57 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
58 *
59 * Carnegie Mellon requests users of this software to return to
60 *
61 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
62 *  School of Computer Science
63 *  Carnegie Mellon University
64 *  Pittsburgh PA 15213-3890
65 *
66 * any improvements or extensions that they make and grant Carnegie the
67 * rights to redistribute these changes.
68 */
69
70/*
71 * uvm_page.c: page ops.
72 */
73
74#include <sys/param.h>
75#include <sys/systm.h>
76#include <sys/sched.h>
77#include <sys/kernel.h>
78#include <sys/vnode.h>
79#include <sys/mount.h>
80#include <sys/proc.h>
81
82#include <uvm/uvm.h>
83
84/*
85 * for object trees
86 */
87RB_GENERATE(uvm_objtree, vm_page, objt, uvm_pagecmp);
88
89int
90uvm_pagecmp(struct vm_page *a, struct vm_page *b)
91{
92	return (a->offset < b->offset ? -1 : a->offset > b->offset);
93}
94
95/*
96 * global vars... XXXCDC: move to uvm. structure.
97 */
98
99/*
100 * physical memory config is stored in vm_physmem.
101 */
102
103struct vm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
104int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
105
106/*
107 * Some supported CPUs in a given architecture don't support all
108 * of the things necessary to do idle page zero'ing efficiently.
109 * We therefore provide a way to disable it from machdep code here.
110 */
111
112/*
113 * XXX disabled until we can find a way to do this without causing
114 * problems for either cpu caches or DMA latency.
115 */
116boolean_t vm_page_zero_enable = FALSE;
117
118/*
119 * local variables
120 */
121
122/*
123 * these variables record the values returned by vm_page_bootstrap,
124 * for debugging purposes.  The implementation of uvm_pageboot_alloc
125 * and pmap_startup here also uses them internally.
126 */
127
128static vaddr_t      virtual_space_start;
129static vaddr_t      virtual_space_end;
130
131/*
132 * local prototypes
133 */
134
135static void uvm_pageinsert(struct vm_page *);
136static void uvm_pageremove(struct vm_page *);
137
138/*
139 * inline functions
140 */
141
142/*
143 * uvm_pageinsert: insert a page in the object
144 *
145 * => caller must lock object
146 * => caller must lock page queues XXX questionable
147 * => call should have already set pg's object and offset pointers
148 *    and bumped the version counter
149 */
150
151__inline static void
152uvm_pageinsert(struct vm_page *pg)
153{
154	struct vm_page	*dupe;
155
156	KASSERT((pg->pg_flags & PG_TABLED) == 0);
157	dupe = RB_INSERT(uvm_objtree, &pg->uobject->memt, pg);
158	/* not allowed to insert over another page */
159	KASSERT(dupe == NULL);
160	atomic_setbits_int(&pg->pg_flags, PG_TABLED);
161	pg->uobject->uo_npages++;
162}
163
164/*
165 * uvm_page_remove: remove page from object
166 *
167 * => caller must lock object
168 * => caller must lock page queues
169 */
170
171static __inline void
172uvm_pageremove(struct vm_page *pg)
173{
174
175	KASSERT(pg->pg_flags & PG_TABLED);
176	RB_REMOVE(uvm_objtree, &pg->uobject->memt, pg);
177
178	atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
179	pg->uobject->uo_npages--;
180	pg->uobject = NULL;
181	pg->pg_version++;
182}
183
184/*
185 * uvm_page_init: init the page system.   called from uvm_init().
186 *
187 * => we return the range of kernel virtual memory in kvm_startp/kvm_endp
188 */
189
190void
191uvm_page_init(vaddr_t *kvm_startp, vaddr_t *kvm_endp)
192{
193	vsize_t freepages, pagecount, n;
194	vm_page_t pagearray;
195	int lcv, i;
196	paddr_t paddr;
197
198	/*
199	 * init the page queues and page queue locks
200	 */
201
202	TAILQ_INIT(&uvm.page_active);
203	TAILQ_INIT(&uvm.page_inactive_swp);
204	TAILQ_INIT(&uvm.page_inactive_obj);
205	simple_lock_init(&uvm.pageqlock);
206	mtx_init(&uvm.fpageqlock, IPL_VM);
207	uvm_pmr_init();
208
209	/*
210	 * allocate vm_page structures.
211	 */
212
213	/*
214	 * sanity check:
215	 * before calling this function the MD code is expected to register
216	 * some free RAM with the uvm_page_physload() function.   our job
217	 * now is to allocate vm_page structures for this memory.
218	 */
219
220	if (vm_nphysseg == 0)
221		panic("uvm_page_bootstrap: no memory pre-allocated");
222
223	/*
224	 * first calculate the number of free pages...
225	 *
226	 * note that we use start/end rather than avail_start/avail_end.
227	 * this allows us to allocate extra vm_page structures in case we
228	 * want to return some memory to the pool after booting.
229	 */
230
231	freepages = 0;
232	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
233		freepages += (vm_physmem[lcv].end - vm_physmem[lcv].start);
234
235	/*
236	 * we now know we have (PAGE_SIZE * freepages) bytes of memory we can
237	 * use.   for each page of memory we use we need a vm_page structure.
238	 * thus, the total number of pages we can use is the total size of
239	 * the memory divided by the PAGE_SIZE plus the size of the vm_page
240	 * structure.   we add one to freepages as a fudge factor to avoid
241	 * truncation errors (since we can only allocate in terms of whole
242	 * pages).
243	 */
244
245	pagecount = (((paddr_t)freepages + 1) << PAGE_SHIFT) /
246	    (PAGE_SIZE + sizeof(struct vm_page));
247	pagearray = (vm_page_t)uvm_pageboot_alloc(pagecount *
248	    sizeof(struct vm_page));
249	memset(pagearray, 0, pagecount * sizeof(struct vm_page));
250
251	/*
252	 * init the vm_page structures and put them in the correct place.
253	 */
254
255	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
256		n = vm_physmem[lcv].end - vm_physmem[lcv].start;
257		if (n > pagecount) {
258			panic("uvm_page_init: lost %ld page(s) in init",
259			    (long)(n - pagecount));
260			    /* XXXCDC: shouldn't happen? */
261			/* n = pagecount; */
262		}
263
264		/* set up page array pointers */
265		vm_physmem[lcv].pgs = pagearray;
266		pagearray += n;
267		pagecount -= n;
268		vm_physmem[lcv].lastpg = vm_physmem[lcv].pgs + (n - 1);
269
270		/* init and free vm_pages (we've already zeroed them) */
271		paddr = ptoa(vm_physmem[lcv].start);
272		for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
273			vm_physmem[lcv].pgs[i].phys_addr = paddr;
274#ifdef __HAVE_VM_PAGE_MD
275			VM_MDPAGE_INIT(&vm_physmem[lcv].pgs[i]);
276#endif
277			if (atop(paddr) >= vm_physmem[lcv].avail_start &&
278			    atop(paddr) <= vm_physmem[lcv].avail_end) {
279				uvmexp.npages++;
280			}
281		}
282
283		/*
284		 * Add pages to free pool.
285		 */
286		uvm_pmr_freepages(&vm_physmem[lcv].pgs[
287		    vm_physmem[lcv].avail_start - vm_physmem[lcv].start],
288		    vm_physmem[lcv].avail_end - vm_physmem[lcv].avail_start);
289	}
290
291	/*
292	 * pass up the values of virtual_space_start and
293	 * virtual_space_end (obtained by uvm_pageboot_alloc) to the upper
294	 * layers of the VM.
295	 */
296
297	*kvm_startp = round_page(virtual_space_start);
298	*kvm_endp = trunc_page(virtual_space_end);
299
300	/*
301	 * init locks for kernel threads
302	 */
303	mtx_init(&uvm.aiodoned_lock, IPL_BIO);
304
305	/*
306	 * init reserve thresholds
307	 * XXXCDC - values may need adjusting
308	 */
309	uvmexp.reserve_pagedaemon = 4;
310	uvmexp.reserve_kernel = 6;
311	uvmexp.anonminpct = 10;
312	uvmexp.vnodeminpct = 10;
313	uvmexp.vtextminpct = 5;
314	uvmexp.anonmin = uvmexp.anonminpct * 256 / 100;
315	uvmexp.vnodemin = uvmexp.vnodeminpct * 256 / 100;
316	uvmexp.vtextmin = uvmexp.vtextminpct * 256 / 100;
317
318  	/*
319	 * determine if we should zero pages in the idle loop.
320	 */
321
322	uvm.page_idle_zero = vm_page_zero_enable;
323
324	/*
325	 * done!
326	 */
327
328	uvm.page_init_done = TRUE;
329}
330
331/*
332 * uvm_setpagesize: set the page size
333 *
334 * => sets page_shift and page_mask from uvmexp.pagesize.
335 */
336
337void
338uvm_setpagesize(void)
339{
340	if (uvmexp.pagesize == 0)
341		uvmexp.pagesize = DEFAULT_PAGE_SIZE;
342	uvmexp.pagemask = uvmexp.pagesize - 1;
343	if ((uvmexp.pagemask & uvmexp.pagesize) != 0)
344		panic("uvm_setpagesize: page size not a power of two");
345	for (uvmexp.pageshift = 0; ; uvmexp.pageshift++)
346		if ((1 << uvmexp.pageshift) == uvmexp.pagesize)
347			break;
348}
349
350/*
351 * uvm_pageboot_alloc: steal memory from physmem for bootstrapping
352 */
353
354vaddr_t
355uvm_pageboot_alloc(vsize_t size)
356{
357#if defined(PMAP_STEAL_MEMORY)
358	vaddr_t addr;
359
360	/*
361	 * defer bootstrap allocation to MD code (it may want to allocate
362	 * from a direct-mapped segment).  pmap_steal_memory should round
363	 * off virtual_space_start/virtual_space_end.
364	 */
365
366	addr = pmap_steal_memory(size, &virtual_space_start,
367	    &virtual_space_end);
368
369	return(addr);
370
371#else /* !PMAP_STEAL_MEMORY */
372
373	static boolean_t initialized = FALSE;
374	vaddr_t addr, vaddr;
375	paddr_t paddr;
376
377	/* round to page size */
378	size = round_page(size);
379
380	/*
381	 * on first call to this function, initialize ourselves.
382	 */
383	if (initialized == FALSE) {
384		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
385
386		/* round it the way we like it */
387		virtual_space_start = round_page(virtual_space_start);
388		virtual_space_end = trunc_page(virtual_space_end);
389
390		initialized = TRUE;
391	}
392
393	/*
394	 * allocate virtual memory for this request
395	 */
396	if (virtual_space_start == virtual_space_end ||
397	    (virtual_space_end - virtual_space_start) < size)
398		panic("uvm_pageboot_alloc: out of virtual space");
399
400	addr = virtual_space_start;
401
402#ifdef PMAP_GROWKERNEL
403	/*
404	 * If the kernel pmap can't map the requested space,
405	 * then allocate more resources for it.
406	 */
407	if (uvm_maxkaddr < (addr + size)) {
408		uvm_maxkaddr = pmap_growkernel(addr + size);
409		if (uvm_maxkaddr < (addr + size))
410			panic("uvm_pageboot_alloc: pmap_growkernel() failed");
411	}
412#endif
413
414	virtual_space_start += size;
415
416	/*
417	 * allocate and mapin physical pages to back new virtual pages
418	 */
419
420	for (vaddr = round_page(addr) ; vaddr < addr + size ;
421	    vaddr += PAGE_SIZE) {
422
423		if (!uvm_page_physget(&paddr))
424			panic("uvm_pageboot_alloc: out of memory");
425
426		/*
427		 * Note this memory is no longer managed, so using
428		 * pmap_kenter is safe.
429		 */
430		pmap_kenter_pa(vaddr, paddr, VM_PROT_READ|VM_PROT_WRITE);
431	}
432	pmap_update(pmap_kernel());
433	return(addr);
434#endif	/* PMAP_STEAL_MEMORY */
435}
436
437#if !defined(PMAP_STEAL_MEMORY)
438/*
439 * uvm_page_physget: "steal" one page from the vm_physmem structure.
440 *
441 * => attempt to allocate it off the end of a segment in which the "avail"
442 *    values match the start/end values.   if we can't do that, then we
443 *    will advance both values (making them equal, and removing some
444 *    vm_page structures from the non-avail area).
445 * => return false if out of memory.
446 */
447
448boolean_t
449uvm_page_physget(paddr_t *paddrp)
450{
451	int lcv, x;
452
453	/* pass 1: try allocating from a matching end */
454#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
455	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
456	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
457#else
458	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
459#endif
460	{
461
462		if (uvm.page_init_done == TRUE)
463			panic("uvm_page_physget: called _after_ bootstrap");
464
465		/* try from front */
466		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].start &&
467		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
468			*paddrp = ptoa(vm_physmem[lcv].avail_start);
469			vm_physmem[lcv].avail_start++;
470			vm_physmem[lcv].start++;
471			/* nothing left?   nuke it */
472			if (vm_physmem[lcv].avail_start ==
473			    vm_physmem[lcv].end) {
474				if (vm_nphysseg == 1)
475				    panic("uvm_page_physget: out of memory!");
476				vm_nphysseg--;
477				for (x = lcv ; x < vm_nphysseg ; x++)
478					/* structure copy */
479					vm_physmem[x] = vm_physmem[x+1];
480			}
481			return (TRUE);
482		}
483
484		/* try from rear */
485		if (vm_physmem[lcv].avail_end == vm_physmem[lcv].end &&
486		    vm_physmem[lcv].avail_start < vm_physmem[lcv].avail_end) {
487			*paddrp = ptoa(vm_physmem[lcv].avail_end - 1);
488			vm_physmem[lcv].avail_end--;
489			vm_physmem[lcv].end--;
490			/* nothing left?   nuke it */
491			if (vm_physmem[lcv].avail_end ==
492			    vm_physmem[lcv].start) {
493				if (vm_nphysseg == 1)
494				    panic("uvm_page_physget: out of memory!");
495				vm_nphysseg--;
496				for (x = lcv ; x < vm_nphysseg ; x++)
497					/* structure copy */
498					vm_physmem[x] = vm_physmem[x+1];
499			}
500			return (TRUE);
501		}
502	}
503
504	/* pass2: forget about matching ends, just allocate something */
505#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST) || \
506	(VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
507	for (lcv = vm_nphysseg - 1 ; lcv >= 0 ; lcv--)
508#else
509	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
510#endif
511	{
512
513		/* any room in this bank? */
514		if (vm_physmem[lcv].avail_start >= vm_physmem[lcv].avail_end)
515			continue;  /* nope */
516
517		*paddrp = ptoa(vm_physmem[lcv].avail_start);
518		vm_physmem[lcv].avail_start++;
519		/* truncate! */
520		vm_physmem[lcv].start = vm_physmem[lcv].avail_start;
521
522		/* nothing left?   nuke it */
523		if (vm_physmem[lcv].avail_start == vm_physmem[lcv].end) {
524			if (vm_nphysseg == 1)
525				panic("uvm_page_physget: out of memory!");
526			vm_nphysseg--;
527			for (x = lcv ; x < vm_nphysseg ; x++)
528				/* structure copy */
529				vm_physmem[x] = vm_physmem[x+1];
530		}
531		return (TRUE);
532	}
533
534	return (FALSE);        /* whoops! */
535}
536
537#endif /* PMAP_STEAL_MEMORY */
538
539/*
540 * uvm_page_physload: load physical memory into VM system
541 *
542 * => all args are PFs
543 * => all pages in start/end get vm_page structures
544 * => areas marked by avail_start/avail_end get added to the free page pool
545 * => we are limited to VM_PHYSSEG_MAX physical memory segments
546 */
547
548void
549uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
550    paddr_t avail_end, int flags)
551{
552	int preload, lcv;
553	psize_t npages;
554	struct vm_page *pgs;
555	struct vm_physseg *ps;
556
557	if (uvmexp.pagesize == 0)
558		panic("uvm_page_physload: page size not set!");
559
560	if (start >= end)
561		panic("uvm_page_physload: start >= end");
562
563	/*
564	 * do we have room?
565	 */
566	if (vm_nphysseg == VM_PHYSSEG_MAX) {
567		printf("uvm_page_physload: unable to load physical memory "
568		    "segment\n");
569		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
570		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
571		printf("\tincrease VM_PHYSSEG_MAX\n");
572		return;
573	}
574
575	/*
576	 * check to see if this is a "preload" (i.e. uvm_mem_init hasn't been
577	 * called yet, so malloc is not available).
578	 */
579	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++) {
580		if (vm_physmem[lcv].pgs)
581			break;
582	}
583	preload = (lcv == vm_nphysseg);
584
585	/*
586	 * if VM is already running, attempt to malloc() vm_page structures
587	 */
588	if (!preload) {
589		/*
590		 * XXXCDC: need some sort of lockout for this case
591		 * right now it is only used by devices so it should be alright.
592		 */
593 		paddr_t paddr;
594
595 		npages = end - start;  /* # of pages */
596
597		pgs = (struct vm_page *)uvm_km_zalloc(kernel_map,
598		    npages * sizeof(*pgs));
599		if (pgs == NULL) {
600			printf("uvm_page_physload: can not malloc vm_page "
601			    "structs for segment\n");
602			printf("\tignoring 0x%lx -> 0x%lx\n", start, end);
603			return;
604		}
605		/* init phys_addr and free pages, XXX uvmexp.npages */
606		for (lcv = 0, paddr = ptoa(start); lcv < npages;
607		    lcv++, paddr += PAGE_SIZE) {
608			pgs[lcv].phys_addr = paddr;
609#ifdef __HAVE_VM_PAGE_MD
610			VM_MDPAGE_INIT(&pgs[lcv]);
611#endif
612			if (atop(paddr) >= avail_start &&
613			    atop(paddr) <= avail_end) {
614				if (flags & PHYSLOAD_DEVICE) {
615					atomic_setbits_int(&pgs[lcv].pg_flags,
616					    PG_DEV);
617					pgs[lcv].wire_count = 1;
618				} else {
619#if defined(VM_PHYSSEG_NOADD)
620		panic("uvm_page_physload: tried to add RAM after vm_mem_init");
621#endif
622				}
623			}
624		}
625
626		/*
627		 * Add pages to free pool.
628		 */
629		if ((flags & PHYSLOAD_DEVICE) == 0) {
630			uvm_pmr_freepages(&pgs[avail_start - start],
631			    avail_end - avail_start);
632		}
633
634		/* XXXCDC: need hook to tell pmap to rebuild pv_list, etc... */
635	} else {
636
637		/* gcc complains if these don't get init'd */
638		pgs = NULL;
639		npages = 0;
640
641	}
642
643	/*
644	 * now insert us in the proper place in vm_physmem[]
645	 */
646
647#if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
648
649	/* random: put it at the end (easy!) */
650	ps = &vm_physmem[vm_nphysseg];
651
652#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
653
654	{
655		int x;
656		/* sort by address for binary search */
657		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
658			if (start < vm_physmem[lcv].start)
659				break;
660		ps = &vm_physmem[lcv];
661		/* move back other entries, if necessary ... */
662		for (x = vm_nphysseg ; x > lcv ; x--)
663			/* structure copy */
664			vm_physmem[x] = vm_physmem[x - 1];
665	}
666
667#elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
668
669	{
670		int x;
671		/* sort by largest segment first */
672		for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
673			if ((end - start) >
674			    (vm_physmem[lcv].end - vm_physmem[lcv].start))
675				break;
676		ps = &vm_physmem[lcv];
677		/* move back other entries, if necessary ... */
678		for (x = vm_nphysseg ; x > lcv ; x--)
679			/* structure copy */
680			vm_physmem[x] = vm_physmem[x - 1];
681	}
682
683#else
684
685	panic("uvm_page_physload: unknown physseg strategy selected!");
686
687#endif
688
689	ps->start = start;
690	ps->end = end;
691	ps->avail_start = avail_start;
692	ps->avail_end = avail_end;
693	if (preload) {
694		ps->pgs = NULL;
695	} else {
696		ps->pgs = pgs;
697		ps->lastpg = pgs + npages - 1;
698	}
699	vm_nphysseg++;
700
701	/*
702	 * done!
703	 */
704
705	return;
706}
707
708#ifdef DDB /* XXXCDC: TMP TMP TMP DEBUG DEBUG DEBUG */
709
710void uvm_page_physdump(void); /* SHUT UP GCC */
711
712/* call from DDB */
713void
714uvm_page_physdump(void)
715{
716	int lcv;
717
718	printf("uvm_page_physdump: physical memory config [segs=%d of %d]:\n",
719	    vm_nphysseg, VM_PHYSSEG_MAX);
720	for (lcv = 0 ; lcv < vm_nphysseg ; lcv++)
721		printf("0x%llx->0x%llx [0x%llx->0x%llx]\n",
722		    (long long)vm_physmem[lcv].start,
723		    (long long)vm_physmem[lcv].end,
724		    (long long)vm_physmem[lcv].avail_start,
725		    (long long)vm_physmem[lcv].avail_end);
726	printf("STRATEGY = ");
727	switch (VM_PHYSSEG_STRAT) {
728	case VM_PSTRAT_RANDOM: printf("RANDOM\n"); break;
729	case VM_PSTRAT_BSEARCH: printf("BSEARCH\n"); break;
730	case VM_PSTRAT_BIGFIRST: printf("BIGFIRST\n"); break;
731	default: printf("<<UNKNOWN>>!!!!\n");
732	}
733}
734#endif
735
736void
737uvm_shutdown(void)
738{
739#ifdef UVM_SWAP_ENCRYPT
740	uvm_swap_finicrypt_all();
741#endif
742}
743
744/*
745 * Perform insert of a given page in the specified anon of obj.
746 * This is basically, uvm_pagealloc, but with the page already given.
747 */
748void
749uvm_pagealloc_pg(struct vm_page *pg, struct uvm_object *obj, voff_t off,
750    struct vm_anon *anon)
751{
752	int	flags;
753
754	flags = PG_BUSY | PG_FAKE;
755	pg->offset = off;
756	pg->uobject = obj;
757	pg->uanon = anon;
758
759	if (anon) {
760		anon->an_page = pg;
761		flags |= PQ_ANON;
762	} else if (obj)
763		uvm_pageinsert(pg);
764	atomic_setbits_int(&pg->pg_flags, flags);
765#if defined(UVM_PAGE_TRKOWN)
766	pg->owner_tag = NULL;
767#endif
768	UVM_PAGE_OWN(pg, "new alloc");
769}
770
771/*
772 * uvm_pglistalloc: allocate a list of pages
773 *
774 * => allocated pages are placed at the tail of rlist.  rlist is
775 *    assumed to be properly initialized by caller.
776 * => returns 0 on success or errno on failure
777 * => doesn't take into account clean non-busy pages on inactive list
778 *	that could be used(?)
779 * => params:
780 *	size		the size of the allocation, rounded to page size.
781 *	low		the low address of the allowed allocation range.
782 *	high		the high address of the allowed allocation range.
783 *	alignment	memory must be aligned to this power-of-two boundary.
784 *	boundary	no segment in the allocation may cross this
785 *			power-of-two boundary (relative to zero).
786 * => flags:
787 *	UVM_PLA_NOWAIT	fail if allocation fails
788 *	UVM_PLA_WAITOK	wait for memory to become avail
789 *	UVM_PLA_ZERO	return zeroed memory
790 */
791int
792uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
793    paddr_t boundary, struct pglist *rlist, int nsegs, int flags)
794{
795	KASSERT((alignment & (alignment - 1)) == 0);
796	KASSERT((boundary & (boundary - 1)) == 0);
797	KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT));
798
799	if (size == 0)
800		return (EINVAL);
801	/*
802	 * check to see if we need to generate some free pages waking
803	 * the pagedaemon.
804	 */
805	if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
806	    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
807	    (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
808		wakeup(&uvm.pagedaemon);
809
810	/*
811	 * XXX uvm_pglistalloc is currently only used for kernel
812	 * objects. Unlike the checks in uvm_pagealloc, below, here
813	 * we are always allowed to use the kernel reseve. However, we
814	 * have to enforce the pagedaemon reserve here or allocations
815	 * via this path could consume everything and we can't
816	 * recover in the page daemon.
817	 */
818 again:
819	if ((uvmexp.free <= uvmexp.reserve_pagedaemon &&
820	    !((curproc == uvm.pagedaemon_proc) ||
821		(curproc == syncerproc)))) {
822		if (flags & UVM_PLA_WAITOK) {
823			uvm_wait("uvm_pglistalloc");
824			goto again;
825		}
826		return (ENOMEM);
827	}
828
829	if ((high & PAGE_MASK) != PAGE_MASK) {
830		printf("uvm_pglistalloc: Upper boundary 0x%lx "
831		    "not on pagemask.\n", (unsigned long)high);
832	}
833
834	/*
835	 * Our allocations are always page granularity, so our alignment
836	 * must be, too.
837	 */
838	if (alignment < PAGE_SIZE)
839		alignment = PAGE_SIZE;
840
841	low = atop(roundup(low, alignment));
842	/*
843	 * high + 1 may result in overflow, in which case high becomes 0x0,
844	 * which is the 'don't care' value.
845	 * The only requirement in that case is that low is also 0x0, or the
846	 * low<high assert will fail.
847	 */
848	high = atop(high + 1);
849	size = atop(round_page(size));
850	alignment = atop(alignment);
851	if (boundary < PAGE_SIZE && boundary != 0)
852		boundary = PAGE_SIZE;
853	boundary = atop(boundary);
854
855	return uvm_pmr_getpages(size, low, high, alignment, boundary, nsegs,
856	    flags, rlist);
857}
858
859/*
860 * uvm_pglistfree: free a list of pages
861 *
862 * => pages should already be unmapped
863 */
864void
865uvm_pglistfree(struct pglist *list)
866{
867	uvm_pmr_freepageq(list);
868}
869
870/*
871 * interface used by the buffer cache to allocate a buffer at a time.
872 * The pages are allocated wired in DMA accessible memory
873 */
874void
875uvm_pagealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
876    int flags)
877{
878	struct pglist    plist;
879	struct vm_page  *pg;
880	int              i;
881
882
883	TAILQ_INIT(&plist);
884	(void) uvm_pglistalloc(size, dma_constraint.ucr_low,
885	    dma_constraint.ucr_high, 0, 0, &plist, atop(round_page(size)),
886	    UVM_PLA_WAITOK);
887	i = 0;
888	while ((pg = TAILQ_FIRST(&plist)) != NULL) {
889		pg->wire_count = 1;
890		atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
891		KASSERT((pg->pg_flags & PG_DEV) == 0);
892		TAILQ_REMOVE(&plist, pg, pageq);
893		uvm_pagealloc_pg(pg, obj, off + ptoa(i++), NULL);
894	}
895}
896
897/*
898 * interface used by the buffer cache to reallocate a buffer at a time.
899 * The pages are reallocated wired outside the DMA accessible region.
900 *
901 */
902void
903uvm_pagerealloc_multi(struct uvm_object *obj, voff_t off, vsize_t size,
904    int flags, struct uvm_constraint_range *where)
905{
906	struct pglist    plist;
907	struct vm_page  *pg, *tpg;
908	int              i;
909	voff_t		offset;
910
911
912	TAILQ_INIT(&plist);
913	if (size == 0)
914		panic("size 0 uvm_pagerealloc");
915	(void) uvm_pglistalloc(size, where->ucr_low, where->ucr_high, 0,
916	    0, &plist, atop(round_page(size)), UVM_PLA_WAITOK);
917	i = 0;
918	while((pg = TAILQ_FIRST(&plist)) != NULL) {
919		offset = off + ptoa(i++);
920		tpg = uvm_pagelookup(obj, offset);
921		pg->wire_count = 1;
922		atomic_setbits_int(&pg->pg_flags, PG_CLEAN | PG_FAKE);
923		KASSERT((pg->pg_flags & PG_DEV) == 0);
924		TAILQ_REMOVE(&plist, pg, pageq);
925		uvm_pagecopy(tpg, pg);
926		uvm_pagefree(tpg);
927		uvm_pagealloc_pg(pg, obj, offset, NULL);
928	}
929}
930
931/*
932 * uvm_pagealloc_strat: allocate vm_page from a particular free list.
933 *
934 * => return null if no pages free
935 * => wake up pagedaemon if number of free pages drops below low water mark
936 * => if obj != NULL, obj must be locked (to put in tree)
937 * => if anon != NULL, anon must be locked (to put in anon)
938 * => only one of obj or anon can be non-null
939 * => caller must activate/deactivate page if it is not wired.
940 */
941
942struct vm_page *
943uvm_pagealloc(struct uvm_object *obj, voff_t off, struct vm_anon *anon,
944    int flags)
945{
946	struct vm_page *pg;
947	struct pglist pgl;
948	int pmr_flags;
949	boolean_t use_reserve;
950
951	KASSERT(obj == NULL || anon == NULL);
952	KASSERT(off == trunc_page(off));
953
954	/*
955	 * check to see if we need to generate some free pages waking
956	 * the pagedaemon.
957	 */
958	if ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freemin ||
959	    ((uvmexp.free - BUFPAGES_DEFICIT) < uvmexp.freetarg &&
960	    (uvmexp.inactive + BUFPAGES_INACT) < uvmexp.inactarg))
961		wakeup(&uvm.pagedaemon);
962
963	/*
964	 * fail if any of these conditions is true:
965	 * [1]  there really are no free pages, or
966	 * [2]  only kernel "reserved" pages remain and
967	 *        the page isn't being allocated to a kernel object.
968	 * [3]  only pagedaemon "reserved" pages remain and
969	 *        the requestor isn't the pagedaemon.
970	 */
971
972	use_reserve = (flags & UVM_PGA_USERESERVE) ||
973		(obj && UVM_OBJ_IS_KERN_OBJECT(obj));
974	if ((uvmexp.free <= uvmexp.reserve_kernel && !use_reserve) ||
975	    (uvmexp.free <= uvmexp.reserve_pagedaemon &&
976	     !((curproc == uvm.pagedaemon_proc) ||
977	      (curproc == syncerproc))))
978		goto fail;
979
980	pmr_flags = UVM_PLA_NOWAIT;
981	if (flags & UVM_PGA_ZERO)
982		pmr_flags |= UVM_PLA_ZERO;
983	TAILQ_INIT(&pgl);
984	if (uvm_pmr_getpages(1, 0, 0, 1, 0, 1, pmr_flags, &pgl) != 0)
985		goto fail;
986
987	pg = TAILQ_FIRST(&pgl);
988	KASSERT(pg != NULL && TAILQ_NEXT(pg, pageq) == NULL);
989
990	uvm_pagealloc_pg(pg, obj, off, anon);
991	KASSERT((pg->pg_flags & PG_DEV) == 0);
992	atomic_setbits_int(&pg->pg_flags, PG_BUSY|PG_CLEAN|PG_FAKE);
993	if (flags & UVM_PGA_ZERO)
994		atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
995
996	return(pg);
997
998 fail:
999	return (NULL);
1000}
1001
1002/*
1003 * uvm_pagerealloc: reallocate a page from one object to another
1004 *
1005 * => both objects must be locked
1006 */
1007
1008void
1009uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1010{
1011
1012	/*
1013	 * remove it from the old object
1014	 */
1015
1016	if (pg->uobject) {
1017		uvm_pageremove(pg);
1018	}
1019
1020	/*
1021	 * put it in the new object
1022	 */
1023
1024	if (newobj) {
1025		pg->uobject = newobj;
1026		pg->offset = newoff;
1027		pg->pg_version++;
1028		uvm_pageinsert(pg);
1029	}
1030}
1031
1032
1033/*
1034 * uvm_pagefree: free page
1035 *
1036 * => erase page's identity (i.e. remove from object)
1037 * => put page on free list
1038 * => caller must lock owning object (either anon or uvm_object)
1039 * => caller must lock page queues
1040 * => assumes all valid mappings of pg are gone
1041 */
1042
1043void
1044uvm_pagefree(struct vm_page *pg)
1045{
1046	int saved_loan_count = pg->loan_count;
1047
1048#ifdef DEBUG
1049	if (pg->uobject == (void *)0xdeadbeef &&
1050	    pg->uanon == (void *)0xdeadbeef) {
1051		panic("uvm_pagefree: freeing free page %p", pg);
1052	}
1053#endif
1054
1055	KASSERT((pg->pg_flags & PG_DEV) == 0);
1056
1057	/*
1058	 * if the page was an object page (and thus "TABLED"), remove it
1059	 * from the object.
1060	 */
1061
1062	if (pg->pg_flags & PG_TABLED) {
1063
1064		/*
1065		 * if the object page is on loan we are going to drop ownership.
1066		 * it is possible that an anon will take over as owner for this
1067		 * page later on.   the anon will want a !PG_CLEAN page so that
1068		 * it knows it needs to allocate swap if it wants to page the
1069		 * page out.
1070		 */
1071
1072		/* in case an anon takes over */
1073		if (saved_loan_count)
1074			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1075		uvm_pageremove(pg);
1076
1077		/*
1078		 * if our page was on loan, then we just lost control over it
1079		 * (in fact, if it was loaned to an anon, the anon may have
1080		 * already taken over ownership of the page by now and thus
1081		 * changed the loan_count [e.g. in uvmfault_anonget()]) we just
1082		 * return (when the last loan is dropped, then the page can be
1083		 * freed by whatever was holding the last loan).
1084		 */
1085
1086		if (saved_loan_count)
1087			return;
1088	} else if (saved_loan_count && pg->uanon) {
1089		/*
1090		 * if our page is owned by an anon and is loaned out to the
1091		 * kernel then we just want to drop ownership and return.
1092		 * the kernel must free the page when all its loans clear ...
1093		 * note that the kernel can't change the loan status of our
1094		 * page as long as we are holding PQ lock.
1095		 */
1096		atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
1097		pg->uanon->an_page = NULL;
1098		pg->uanon = NULL;
1099		return;
1100	}
1101	KASSERT(saved_loan_count == 0);
1102
1103	/*
1104	 * now remove the page from the queues
1105	 */
1106
1107	if (pg->pg_flags & PQ_ACTIVE) {
1108		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1109		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1110		uvmexp.active--;
1111	}
1112	if (pg->pg_flags & PQ_INACTIVE) {
1113		if (pg->pg_flags & PQ_SWAPBACKED)
1114			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1115		else
1116			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1117		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1118		uvmexp.inactive--;
1119	}
1120
1121	/*
1122	 * if the page was wired, unwire it now.
1123	 */
1124
1125	if (pg->wire_count) {
1126		pg->wire_count = 0;
1127		uvmexp.wired--;
1128	}
1129	if (pg->uanon) {
1130		pg->uanon->an_page = NULL;
1131		pg->uanon = NULL;
1132		atomic_clearbits_int(&pg->pg_flags, PQ_ANON);
1133	}
1134
1135	/*
1136	 * Clean page state bits.
1137	 */
1138	atomic_clearbits_int(&pg->pg_flags, PQ_AOBJ); /* XXX: find culprit */
1139	atomic_clearbits_int(&pg->pg_flags, PQ_ENCRYPT|
1140	    PG_ZERO|PG_FAKE|PG_BUSY|PG_RELEASED|PG_CLEAN|PG_CLEANCHK);
1141
1142	/*
1143	 * and put on free queue
1144	 */
1145
1146#ifdef DEBUG
1147	pg->uobject = (void *)0xdeadbeef;
1148	pg->offset = 0xdeadbeef;
1149	pg->uanon = (void *)0xdeadbeef;
1150#endif
1151
1152	uvm_pmr_freepages(pg, 1);
1153
1154	if (uvmexp.zeropages < UVM_PAGEZERO_TARGET)
1155		uvm.page_idle_zero = vm_page_zero_enable;
1156}
1157
1158/*
1159 * uvm_page_unbusy: unbusy an array of pages.
1160 *
1161 * => pages must either all belong to the same object, or all belong to anons.
1162 * => if pages are object-owned, object must be locked.
1163 * => if pages are anon-owned, anons must be unlockd and have 0 refcount.
1164 */
1165
1166void
1167uvm_page_unbusy(struct vm_page **pgs, int npgs)
1168{
1169	struct vm_page *pg;
1170	struct uvm_object *uobj;
1171	int i;
1172
1173	for (i = 0; i < npgs; i++) {
1174		pg = pgs[i];
1175
1176		if (pg == NULL || pg == PGO_DONTCARE) {
1177			continue;
1178		}
1179		if (pg->pg_flags & PG_WANTED) {
1180			wakeup(pg);
1181		}
1182		if (pg->pg_flags & PG_RELEASED) {
1183			uobj = pg->uobject;
1184			if (uobj != NULL) {
1185				uvm_lock_pageq();
1186				pmap_page_protect(pg, VM_PROT_NONE);
1187				/* XXX won't happen right now */
1188				if (pg->pg_flags & PQ_AOBJ)
1189					uao_dropswap(uobj,
1190					    pg->offset >> PAGE_SHIFT);
1191				uvm_pagefree(pg);
1192				uvm_unlock_pageq();
1193			} else {
1194				atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
1195				UVM_PAGE_OWN(pg, NULL);
1196				uvm_anfree(pg->uanon);
1197			}
1198		} else {
1199			atomic_clearbits_int(&pg->pg_flags, PG_WANTED|PG_BUSY);
1200			UVM_PAGE_OWN(pg, NULL);
1201		}
1202	}
1203}
1204
1205#if defined(UVM_PAGE_TRKOWN)
1206/*
1207 * uvm_page_own: set or release page ownership
1208 *
1209 * => this is a debugging function that keeps track of who sets PG_BUSY
1210 *	and where they do it.   it can be used to track down problems
1211 *	such a process setting "PG_BUSY" and never releasing it.
1212 * => page's object [if any] must be locked
1213 * => if "tag" is NULL then we are releasing page ownership
1214 */
1215void
1216uvm_page_own(struct vm_page *pg, char *tag)
1217{
1218	/* gain ownership? */
1219	if (tag) {
1220		if (pg->owner_tag) {
1221			printf("uvm_page_own: page %p already owned "
1222			    "by proc %d [%s]\n", pg,
1223			     pg->owner, pg->owner_tag);
1224			panic("uvm_page_own");
1225		}
1226		pg->owner = (curproc) ? curproc->p_pid :  (pid_t) -1;
1227		pg->owner_tag = tag;
1228		return;
1229	}
1230
1231	/* drop ownership */
1232	if (pg->owner_tag == NULL) {
1233		printf("uvm_page_own: dropping ownership of an non-owned "
1234		    "page (%p)\n", pg);
1235		panic("uvm_page_own");
1236	}
1237	pg->owner_tag = NULL;
1238	return;
1239}
1240#endif
1241
1242/*
1243 * uvm_pageidlezero: zero free pages while the system is idle.
1244 *
1245 * => we do at least one iteration per call, if we are below the target.
1246 * => we loop until we either reach the target or whichqs indicates that
1247 *	there is a process ready to run.
1248 */
1249void
1250uvm_pageidlezero(void)
1251{
1252#if 0 /* disabled: need new code */
1253	struct vm_page *pg;
1254	struct pgfreelist *pgfl;
1255	int free_list;
1256
1257	do {
1258		uvm_lock_fpageq();
1259
1260		if (uvmexp.zeropages >= UVM_PAGEZERO_TARGET) {
1261			uvm.page_idle_zero = FALSE;
1262			uvm_unlock_fpageq();
1263			return;
1264		}
1265
1266		for (free_list = 0; free_list < VM_NFREELIST; free_list++) {
1267			pgfl = &uvm.page_free[free_list];
1268			if ((pg = TAILQ_FIRST(&pgfl->pgfl_queues[
1269			    PGFL_UNKNOWN])) != NULL)
1270				break;
1271		}
1272
1273		if (pg == NULL) {
1274			/*
1275			 * No non-zero'd pages; don't bother trying again
1276			 * until we know we have non-zero'd pages free.
1277			 */
1278			uvm.page_idle_zero = FALSE;
1279			uvm_unlock_fpageq();
1280			return;
1281		}
1282
1283		TAILQ_REMOVE(&pgfl->pgfl_queues[PGFL_UNKNOWN], pg, pageq);
1284		uvmexp.free--;
1285		uvm_unlock_fpageq();
1286
1287#ifdef PMAP_PAGEIDLEZERO
1288		if (PMAP_PAGEIDLEZERO(pg) == FALSE) {
1289			/*
1290			 * The machine-dependent code detected some
1291			 * reason for us to abort zeroing pages,
1292			 * probably because there is a process now
1293			 * ready to run.
1294			 */
1295			uvm_lock_fpageq();
1296			TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_UNKNOWN],
1297			    pg, pageq);
1298			uvmexp.free++;
1299			uvmexp.zeroaborts++;
1300			uvm_unlock_fpageq();
1301			return;
1302		}
1303#else
1304		/*
1305		 * XXX This will toast the cache unless the pmap_zero_page()
1306		 * XXX implementation does uncached access.
1307		 */
1308		pmap_zero_page(pg);
1309#endif
1310		atomic_setbits_int(&pg->pg_flags, PG_ZERO);
1311
1312		uvm_lock_fpageq();
1313		TAILQ_INSERT_HEAD(&pgfl->pgfl_queues[PGFL_ZEROS], pg, pageq);
1314		uvmexp.free++;
1315		uvmexp.zeropages++;
1316		uvm_unlock_fpageq();
1317	} while (curcpu_is_idle());
1318#endif /* 0 */
1319}
1320
1321/*
1322 * when VM_PHYSSEG_MAX is 1, we can simplify these functions
1323 */
1324
1325#if VM_PHYSSEG_MAX > 1
1326/*
1327 * vm_physseg_find: find vm_physseg structure that belongs to a PA
1328 */
1329int
1330vm_physseg_find(paddr_t pframe, int *offp)
1331{
1332
1333#if (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
1334	/* binary search for it */
1335	int	start, len, try;
1336
1337	/*
1338	 * if try is too large (thus target is less than than try) we reduce
1339	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
1340	 *
1341	 * if the try is too small (thus target is greater than try) then
1342	 * we set the new start to be (try + 1).   this means we need to
1343	 * reduce the length to (round(len/2) - 1).
1344	 *
1345	 * note "adjust" below which takes advantage of the fact that
1346	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
1347	 * for any value of len we may have
1348	 */
1349
1350	for (start = 0, len = vm_nphysseg ; len != 0 ; len = len / 2) {
1351		try = start + (len / 2);	/* try in the middle */
1352
1353		/* start past our try? */
1354		if (pframe >= vm_physmem[try].start) {
1355			/* was try correct? */
1356			if (pframe < vm_physmem[try].end) {
1357				if (offp)
1358					*offp = pframe - vm_physmem[try].start;
1359				return(try);            /* got it */
1360			}
1361			start = try + 1;	/* next time, start here */
1362			len--;			/* "adjust" */
1363		} else {
1364			/*
1365			 * pframe before try, just reduce length of
1366			 * region, done in "for" loop
1367			 */
1368		}
1369	}
1370	return(-1);
1371
1372#else
1373	/* linear search for it */
1374	int	lcv;
1375
1376	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1377		if (pframe >= vm_physmem[lcv].start &&
1378		    pframe < vm_physmem[lcv].end) {
1379			if (offp)
1380				*offp = pframe - vm_physmem[lcv].start;
1381			return(lcv);		   /* got it */
1382		}
1383	}
1384	return(-1);
1385
1386#endif
1387}
1388
1389/*
1390 * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
1391 * back from an I/O mapping (ugh!).   used in some MD code as well.
1392 */
1393struct vm_page *
1394PHYS_TO_VM_PAGE(paddr_t pa)
1395{
1396	paddr_t pf = atop(pa);
1397	int	off;
1398	int	psi;
1399
1400	psi = vm_physseg_find(pf, &off);
1401
1402	return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]);
1403}
1404#endif /* VM_PHYSSEG_MAX > 1 */
1405
1406/*
1407 * uvm_pagelookup: look up a page
1408 *
1409 * => caller should lock object to keep someone from pulling the page
1410 *	out from under it
1411 */
1412struct vm_page *
1413uvm_pagelookup(struct uvm_object *obj, voff_t off)
1414{
1415	/* XXX if stack is too much, handroll */
1416	struct vm_page pg;
1417
1418	pg.offset = off;
1419	return (RB_FIND(uvm_objtree, &obj->memt, &pg));
1420}
1421
1422/*
1423 * uvm_pagewire: wire the page, thus removing it from the daemon's grasp
1424 *
1425 * => caller must lock page queues
1426 */
1427void
1428uvm_pagewire(struct vm_page *pg)
1429{
1430	if (pg->wire_count == 0) {
1431		if (pg->pg_flags & PQ_ACTIVE) {
1432			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1433			atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1434			uvmexp.active--;
1435		}
1436		if (pg->pg_flags & PQ_INACTIVE) {
1437			if (pg->pg_flags & PQ_SWAPBACKED)
1438				TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1439			else
1440				TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1441			atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1442			uvmexp.inactive--;
1443		}
1444		uvmexp.wired++;
1445	}
1446	pg->wire_count++;
1447}
1448
1449/*
1450 * uvm_pageunwire: unwire the page.
1451 *
1452 * => activate if wire count goes to zero.
1453 * => caller must lock page queues
1454 */
1455void
1456uvm_pageunwire(struct vm_page *pg)
1457{
1458	pg->wire_count--;
1459	if (pg->wire_count == 0) {
1460		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1461		uvmexp.active++;
1462		atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1463		uvmexp.wired--;
1464	}
1465}
1466
1467/*
1468 * uvm_pagedeactivate: deactivate page -- no pmaps have access to page
1469 *
1470 * => caller must lock page queues
1471 * => caller must check to make sure page is not wired
1472 * => object that page belongs to must be locked (so we can adjust pg->flags)
1473 */
1474void
1475uvm_pagedeactivate(struct vm_page *pg)
1476{
1477	if (pg->pg_flags & PQ_ACTIVE) {
1478		TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1479		atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
1480		uvmexp.active--;
1481	}
1482	if ((pg->pg_flags & PQ_INACTIVE) == 0) {
1483		KASSERT(pg->wire_count == 0);
1484		if (pg->pg_flags & PQ_SWAPBACKED)
1485			TAILQ_INSERT_TAIL(&uvm.page_inactive_swp, pg, pageq);
1486		else
1487			TAILQ_INSERT_TAIL(&uvm.page_inactive_obj, pg, pageq);
1488		atomic_setbits_int(&pg->pg_flags, PQ_INACTIVE);
1489		uvmexp.inactive++;
1490		pmap_clear_reference(pg);
1491		/*
1492		 * update the "clean" bit.  this isn't 100%
1493		 * accurate, and doesn't have to be.  we'll
1494		 * re-sync it after we zap all mappings when
1495		 * scanning the inactive list.
1496		 */
1497		if ((pg->pg_flags & PG_CLEAN) != 0 &&
1498		    pmap_is_modified(pg))
1499			atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1500	}
1501}
1502
1503/*
1504 * uvm_pageactivate: activate page
1505 *
1506 * => caller must lock page queues
1507 */
1508void
1509uvm_pageactivate(struct vm_page *pg)
1510{
1511	if (pg->pg_flags & PQ_INACTIVE) {
1512		if (pg->pg_flags & PQ_SWAPBACKED)
1513			TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
1514		else
1515			TAILQ_REMOVE(&uvm.page_inactive_obj, pg, pageq);
1516		atomic_clearbits_int(&pg->pg_flags, PQ_INACTIVE);
1517		uvmexp.inactive--;
1518	}
1519	if (pg->wire_count == 0) {
1520
1521		/*
1522		 * if page is already active, remove it from list so we
1523		 * can put it at tail.  if it wasn't active, then mark
1524		 * it active and bump active count
1525		 */
1526		if (pg->pg_flags & PQ_ACTIVE)
1527			TAILQ_REMOVE(&uvm.page_active, pg, pageq);
1528		else {
1529			atomic_setbits_int(&pg->pg_flags, PQ_ACTIVE);
1530			uvmexp.active++;
1531		}
1532
1533		TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
1534	}
1535}
1536
1537/*
1538 * uvm_pagezero: zero fill a page
1539 *
1540 * => if page is part of an object then the object should be locked
1541 *	to protect pg->flags.
1542 */
1543void
1544uvm_pagezero(struct vm_page *pg)
1545{
1546	atomic_clearbits_int(&pg->pg_flags, PG_CLEAN);
1547	pmap_zero_page(pg);
1548}
1549
1550/*
1551 * uvm_pagecopy: copy a page
1552 *
1553 * => if page is part of an object then the object should be locked
1554 *	to protect pg->flags.
1555 */
1556void
1557uvm_pagecopy(struct vm_page *src, struct vm_page *dst)
1558{
1559	atomic_clearbits_int(&dst->pg_flags, PG_CLEAN);
1560	pmap_copy_page(src, dst);
1561}
1562
1563/*
1564 * uvm_pagecount: count the number of physical pages in the address range.
1565 */
1566psize_t
1567uvm_pagecount(struct uvm_constraint_range* constraint)
1568{
1569	int lcv;
1570	psize_t sz;
1571	paddr_t low, high;
1572	paddr_t ps_low, ps_high;
1573
1574	/* Algorithm uses page numbers. */
1575	low = atop(constraint->ucr_low);
1576	high = atop(constraint->ucr_high);
1577
1578	sz = 0;
1579	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
1580		ps_low = MAX(low, vm_physmem[lcv].avail_start);
1581		ps_high = MIN(high, vm_physmem[lcv].avail_end);
1582		if (ps_low < ps_high)
1583			sz += ps_high - ps_low;
1584	}
1585	return sz;
1586}
1587