mmu_oea.c revision 77957
1/*
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $	*/
32 */
33/*
34 * Copyright (C) 2001 Benno Rice.
35 * All rights reserved.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 *    notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 *    notice, this list of conditions and the following disclaimer in the
44 *    documentation and/or other materials provided with the distribution.
45 *
46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */
57
58#ifndef lint
59static const char rcsid[] =
60  "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 77957 2001-06-10 02:39:37Z benno $";
61#endif /* not lint */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/proc.h>
66#include <sys/malloc.h>
67#include <sys/msgbuf.h>
68#include <sys/vmmeter.h>
69#include <sys/mman.h>
70#include <sys/queue.h>
71#include <sys/mutex.h>
72
73#include <vm/vm.h>
74#include <vm/vm_param.h>
75#include <sys/lock.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_page.h>
78#include <vm/vm_map.h>
79#include <vm/vm_object.h>
80#include <vm/vm_extern.h>
81#include <vm/vm_pageout.h>
82#include <vm/vm_pager.h>
83#include <vm/vm_zone.h>
84
85#include <sys/user.h>
86
87#include <machine/pcb.h>
88#include <machine/powerpc.h>
89#include <machine/pte.h>
90
91pte_t	*ptable;
92int	ptab_cnt;
93u_int	ptab_mask;
94#define	HTABSIZE	(ptab_cnt * 64)
95
96#define	MINPV		2048
97
98struct pte_ovfl {
99	LIST_ENTRY(pte_ovfl) po_list;	/* Linked list of overflow entries */
100	struct pte	po_pte;		/* PTE for this mapping */
101};
102
103LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
104
105static struct pmap	kernel_pmap_store;
106pmap_t			kernel_pmap;
107
108static int	npgs;
109static u_int	nextavail;
110
111#ifndef MSGBUFADDR
112extern vm_offset_t	msgbuf_paddr;
113#endif
114
115static struct mem_region	*mem, *avail;
116
117vm_offset_t	avail_start;
118vm_offset_t	avail_end;
119vm_offset_t	virtual_avail;
120vm_offset_t	virtual_end;
121
122vm_offset_t	kernel_vm_end;
123
124static int	pmap_pagedaemon_waken = 0;
125
126extern unsigned int	Maxmem;
127
128#define	ATTRSHFT	4
129
130struct pv_entry	*pv_table;
131
132static vm_zone_t	pvzone;
133static struct vm_zone	pvzone_store;
134static struct vm_object	pvzone_obj;
135static int		pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
136static struct pv_entry	*pvinit;
137
138#if !defined(PMAP_SHPGPERPROC)
139#define	PMAP_SHPGPERPROC	200
140#endif
141
142struct pv_page;
143struct pv_page_info {
144	LIST_ENTRY(pv_page) pgi_list;
145	struct pv_entry	*pgi_freelist;
146	int		pgi_nfree;
147};
148#define	NPVPPG	((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
149struct pv_page {
150	struct pv_page_info	pvp_pgi;
151	struct pv_entry		pvp_pv[NPVPPG];
152};
153LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
154int	pv_nfree;
155int	pv_pcnt;
156static struct pv_entry	*pmap_alloc_pv(void);
157static void		pmap_free_pv(struct pv_entry *);
158
159struct po_page;
160struct po_page_info {
161	LIST_ENTRY(po_page) pgi_list;
162	vm_page_t	pgi_page;
163	LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
164	int		pgi_nfree;
165};
166#define	NPOPPG	((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
167struct po_page {
168	struct po_page_info	pop_pgi;
169	struct pte_ovfl		pop_po[NPOPPG];
170};
171LIST_HEAD(po_page_list, po_page) po_page_freelist;
172int	po_nfree;
173int	po_pcnt;
174static struct pte_ovfl	*poalloc(void);
175static void		pofree(struct pte_ovfl *, int);
176
177static u_int	usedsr[NPMAPS / sizeof(u_int) / 8];
178
179static int	pmap_initialized;
180
181int	pte_spill(vm_offset_t);
182
183/*
184 * These small routines may have to be replaced,
185 * if/when we support processors other that the 604.
186 */
187static __inline void
188tlbie(vm_offset_t ea)
189{
190
191	__asm __volatile ("tlbie %0" :: "r"(ea));
192}
193
194static __inline void
195tlbsync(void)
196{
197
198	__asm __volatile ("sync; tlbsync; sync");
199}
200
201static __inline void
202tlbia(void)
203{
204	vm_offset_t	i;
205
206	__asm __volatile ("sync");
207	for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
208		tlbie(i);
209	}
210	tlbsync();
211}
212
213static __inline int
214ptesr(sr_t *sr, vm_offset_t addr)
215{
216
217	return sr[(u_int)addr >> ADDR_SR_SHFT];
218}
219
220static __inline int
221pteidx(sr_t sr, vm_offset_t addr)
222{
223	int	hash;
224
225	hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
226	return hash & ptab_mask;
227}
228
229static __inline int
230ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
231{
232
233	return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
234	    (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
235}
236
237static __inline struct pv_entry *
238pa_to_pv(vm_offset_t pa)
239{
240#if 0 /* XXX */
241	int	bank, pg;
242
243	bank = vm_physseg_find(atop(pa), &pg);
244	if (bank == -1)
245		return NULL;
246	return &vm_physmem[bank].pmseg.pvent[pg];
247#endif
248	return (NULL);
249}
250
251static __inline char *
252pa_to_attr(vm_offset_t pa)
253{
254#if 0 /* XXX */
255	int	bank, pg;
256
257	bank = vm_physseg_find(atop(pa), &pg);
258	if (bank == -1)
259		return NULL;
260	return &vm_physmem[bank].pmseg.attrs[pg];
261#endif
262	return (NULL);
263}
264
265/*
266 * Try to insert page table entry *pt into the ptable at idx.
267 *
268 * Note: *pt mustn't have PTE_VALID set.
269 * This is done here as required by Book III, 4.12.
270 */
271static int
272pte_insert(int idx, pte_t *pt)
273{
274	pte_t	*ptp;
275	int	i;
276
277	/*
278	 * First try primary hash.
279	 */
280	for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
281		if (!(ptp->pte_hi & PTE_VALID)) {
282			*ptp = *pt;
283			ptp->pte_hi &= ~PTE_HID;
284			__asm __volatile ("sync");
285			ptp->pte_hi |= PTE_VALID;
286			return 1;
287		}
288	}
289
290	/*
291	 * Then try secondary hash.
292	 */
293
294	idx ^= ptab_mask;
295
296	for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
297		if (!(ptp->pte_hi & PTE_VALID)) {
298			*ptp = *pt;
299			ptp->pte_hi |= PTE_HID;
300			__asm __volatile ("sync");
301			ptp->pte_hi |= PTE_VALID;
302			return 1;
303		}
304	}
305
306	return 0;
307}
308
309/*
310 * Spill handler.
311 *
312 * Tries to spill a page table entry from the overflow area.
313 * Note that this routine runs in real mode on a separate stack,
314 * with interrupts disabled.
315 */
316int
317pte_spill(vm_offset_t addr)
318{
319	int		idx, i;
320	sr_t		sr;
321	struct pte_ovfl	*po;
322	pte_t		ps;
323	pte_t		*pt;
324
325	__asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
326	idx = pteidx(sr, addr);
327	for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
328		if (ptematch(&po->po_pte, sr, addr, 0)) {
329			/*
330			 * Now found an entry to be spilled into the real
331			 * ptable.
332			 */
333			if (pte_insert(idx, &po->po_pte)) {
334				LIST_REMOVE(po, po_list);
335				pofree(po, 0);
336				return 1;
337			}
338			/*
339			 * Have to substitute some entry. Use the primary
340			 * hash for this.
341			 *
342			 * Use low bits of timebase as random generator
343			 */
344			__asm ("mftb %0" : "=r"(i));
345			pt = ptable + idx * 8 + (i & 7);
346			pt->pte_hi &= ~PTE_VALID;
347			ps = *pt;
348			__asm __volatile ("sync");
349			tlbie(addr);
350			tlbsync();
351			*pt = po->po_pte;
352			__asm __volatile ("sync");
353			pt->pte_hi |= PTE_VALID;
354			po->po_pte = ps;
355			if (ps.pte_hi & PTE_HID) {
356				/*
357				 * We took an entry that was on the alternate
358				 * hash chain, so move it to it's original
359				 * chain.
360				 */
361				po->po_pte.pte_hi &= ~PTE_HID;
362				LIST_REMOVE(po, po_list);
363				LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
364						 po, po_list);
365			}
366			return 1;
367		}
368	}
369
370	return 0;
371}
372
373/*
374 * This is called during powerpc_init, before the system is really initialized.
375 */
376void
377pmap_bootstrap(u_int kernelstart, u_int kernelend)
378{
379	struct mem_region	*mp, *mp1;
380	int			cnt, i;
381	u_int			s, e, sz;
382
383	/*
384	 * Get memory.
385	 */
386	mem_regions(&mem, &avail);
387	for (mp = mem; mp->size; mp++)
388		Maxmem += btoc(mp->size);
389
390	/*
391	 * Count the number of available entries.
392	 */
393	for (cnt = 0, mp = avail; mp->size; mp++) {
394		cnt++;
395	}
396
397	/*
398	 * Page align all regions.
399	 * Non-page aligned memory isn't very interesting to us.
400	 * Also, sort the entries for ascending addresses.
401	 */
402	kernelstart &= ~PAGE_MASK;
403	kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
404	for (mp = avail; mp->size; mp++) {
405		s = mp->start;
406		e = mp->start + mp->size;
407		/*
408		 * Check whether this region holds all of the kernel.
409		 */
410		if (s < kernelstart && e > kernelend) {
411			avail[cnt].start = kernelend;
412			avail[cnt++].size = e - kernelend;
413			e = kernelstart;
414		}
415		/*
416		 * Look whether this regions starts within the kernel.
417		 */
418		if (s >= kernelstart && s < kernelend) {
419			if (e <= kernelend)
420				goto empty;
421			s = kernelend;
422		}
423		/*
424		 * Now look whether this region ends within the kernel.
425		 */
426		if (e > kernelstart && e <= kernelend) {
427			if (s >= kernelstart)
428				goto empty;
429			e = kernelstart;
430		}
431		/*
432		 * Now page align the start and size of the region.
433		 */
434		s = round_page(s);
435		e = trunc_page(e);
436		if (e < s) {
437			e = s;
438		}
439		sz = e - s;
440		/*
441		 * Check whether some memory is left here.
442		 */
443		if (sz == 0) {
444		empty:
445			bcopy(mp + 1, mp,
446			      (cnt - (mp - avail)) * sizeof *mp);
447			cnt--;
448			mp--;
449			continue;
450		}
451
452		/*
453		 * Do an insertion sort.
454		 */
455		npgs += btoc(sz);
456
457		for (mp1 = avail; mp1 < mp; mp1++) {
458			if (s < mp1->start) {
459				break;
460			}
461		}
462
463		if (mp1 < mp) {
464			bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
465			mp1->start = s;
466			mp1->size = sz;
467		} else {
468			mp->start = s;
469			mp->size = sz;
470		}
471	}
472
473#ifdef HTABENTS
474	ptab_cnt = HTABENTS;
475#else
476	ptab_cnt = (Maxmem + 1) / 2;
477
478	/* The minimum is 1024 PTEGs. */
479	if (ptab_cnt < 1024) {
480		ptab_cnt = 1024;
481	}
482
483	/* Round up to power of 2. */
484	__asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
485	ptab_cnt = 1 << (32 - i);
486#endif
487
488	/*
489	 * Find suitably aligned memory for HTAB.
490	 */
491	for (mp = avail; mp->size; mp++) {
492		s = roundup(mp->start, HTABSIZE) - mp->start;
493
494		if (mp->size < s + HTABSIZE) {
495			continue;
496		}
497
498		ptable = (pte_t *)(mp->start + s);
499
500		if (mp->size == s + HTABSIZE) {
501			if (s)
502				mp->size = s;
503			else {
504				bcopy(mp + 1, mp,
505				      (cnt - (mp - avail)) * sizeof *mp);
506				mp = avail;
507			}
508			break;
509		}
510
511		if (s != 0) {
512			bcopy(mp, mp + 1,
513			      (cnt - (mp - avail)) * sizeof *mp);
514			mp++->size = s;
515			cnt++;
516		}
517
518		mp->start += s + HTABSIZE;
519		mp->size -= s + HTABSIZE;
520		break;
521	}
522
523	if (!mp->size) {
524		panic("not enough memory?");
525	}
526
527	npgs -= btoc(HTABSIZE);
528	bzero((void *)ptable, HTABSIZE);
529	ptab_mask = ptab_cnt - 1;
530
531	/*
532	 * We cannot do pmap_steal_memory here,
533	 * since we don't run with translation enabled yet.
534	 */
535	s = sizeof(struct pte_ovtab) * ptab_cnt;
536	sz = round_page(s);
537
538	for (mp = avail; mp->size; mp++) {
539		if (mp->size >= sz) {
540			break;
541		}
542	}
543
544	if (!mp->size) {
545		panic("not enough memory?");
546	}
547
548	npgs -= btoc(sz);
549	potable = (struct pte_ovtab *)mp->start;
550	mp->size -= sz;
551	mp->start += sz;
552
553	if (mp->size <= 0) {
554		bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
555	}
556
557	for (i = 0; i < ptab_cnt; i++) {
558		LIST_INIT(potable + i);
559	}
560
561#ifndef MSGBUFADDR
562	/*
563	 * allow for msgbuf
564	 */
565	sz = round_page(MSGBUFSIZE);
566	mp = NULL;
567
568	for (mp1 = avail; mp1->size; mp1++) {
569		if (mp1->size >= sz) {
570			mp = mp1;
571		}
572	}
573
574	if (mp == NULL) {
575		panic("not enough memory?");
576	}
577
578	npgs -= btoc(sz);
579	msgbuf_paddr = mp->start + mp->size - sz;
580	mp->size -= sz;
581
582	if (mp->size <= 0) {
583		bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
584	}
585#endif
586
587	/*
588	 * Initialize kernel pmap and hardware.
589	 */
590	kernel_pmap = &kernel_pmap_store;
591
592	{
593		int	batu, batl;
594
595		batu = 0x80001ffe;
596		batl = 0x80000012;
597
598		__asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
599	}
600
601
602#if NPMAPS >= KERNEL_SEGMENT / 16
603	usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
604		|= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
605#endif
606
607#if 0 /* XXX */
608	for (i = 0; i < 16; i++) {
609		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
610		__asm __volatile ("mtsrin %0,%1"
611			      :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
612	}
613#endif
614
615	for (i = 0; i < 16; i++) {
616		int	j;
617
618		__asm __volatile ("mfsrin %0,%1"
619			: "=r" (j)
620			: "r" (i << ADDR_SR_SHFT));
621
622		kernel_pmap->pm_sr[i] = j;
623	}
624
625	kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
626	__asm __volatile ("mtsr %0,%1"
627		      :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
628
629	__asm __volatile ("sync; mtsdr1 %0; isync"
630		      :: "r"((u_int)ptable | (ptab_mask >> 10)));
631
632	tlbia();
633
634	nextavail = avail->start;
635	avail_start = avail->start;
636	for (mp = avail, i = 0; mp->size; mp++) {
637		avail_end = mp->start + mp->size;
638		phys_avail[i++] = mp->start;
639		phys_avail[i++] = mp->start + mp->size;
640	}
641
642	virtual_avail = VM_MIN_KERNEL_ADDRESS;
643	virtual_end = VM_MAX_KERNEL_ADDRESS;
644}
645
646/*
647 * Initialize anything else for pmap handling.
648 * Called during vm_init().
649 */
650void
651pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
652{
653	int	initial_pvs;
654
655	/*
656	 * init the pv free list
657	 */
658	initial_pvs = vm_page_array_size;
659	if (initial_pvs < MINPV) {
660		initial_pvs = MINPV;
661	}
662	pvzone = &pvzone_store;
663	pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
664	    initial_pvs * sizeof(struct pv_entry));
665	zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
666	    vm_page_array_size);
667
668	pmap_initialized = TRUE;
669}
670
671/*
672 * Initialize a preallocated and zeroed pmap structure.
673 */
674void
675pmap_pinit(struct pmap *pm)
676{
677	int	i, j;
678
679	/*
680	 * Allocate some segment registers for this pmap.
681	 */
682	pm->pm_refs = 1;
683	for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
684		if (usedsr[i] != 0xffffffff) {
685			j = ffs(~usedsr[i]) - 1;
686			usedsr[i] |= 1 << j;
687			pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
688			for (i = 1; i < 16; i++) {
689				pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
690			}
691			return;
692		}
693	}
694	panic("out of segments");
695}
696
697void
698pmap_pinit2(pmap_t pmap)
699{
700
701	/*
702	 * Nothing to be done.
703	 */
704	return;
705}
706
707/*
708 * Add a reference to the given pmap.
709 */
710void
711pmap_reference(struct pmap *pm)
712{
713
714	pm->pm_refs++;
715}
716
717/*
718 * Retire the given pmap from service.
719 * Should only be called if the map contains no valid mappings.
720 */
721void
722pmap_destroy(struct pmap *pm)
723{
724
725	if (--pm->pm_refs == 0) {
726		pmap_release(pm);
727		free((caddr_t)pm, M_VMPGDATA);
728	}
729}
730
731/*
732 * Release any resources held by the given physical map.
733 * Called when a pmap initialized by pmap_pinit is being released.
734 */
735void
736pmap_release(struct pmap *pm)
737{
738	int	i, j;
739
740	if (!pm->pm_sr[0]) {
741		panic("pmap_release");
742	}
743	i = pm->pm_sr[0] / 16;
744	j = i % (sizeof usedsr[0] * 8);
745	i /= sizeof usedsr[0] * 8;
746	usedsr[i] &= ~(1 << j);
747}
748
749/*
750 * Copy the range specified by src_addr/len
751 * from the source map to the range dst_addr/len
752 * in the destination map.
753 *
754 * This routine is only advisory and need not do anything.
755 */
756void
757pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
758    vm_size_t len, vm_offset_t src_addr)
759{
760
761	return;
762}
763
764/*
765 * Garbage collects the physical map system for
766 * pages which are no longer used.
767 * Success need not be guaranteed -- that is, there
768 * may well be pages which are not referenced, but
769 * others may be collected.
770 * Called by the pageout daemon when pages are scarce.
771 */
772void
773pmap_collect(void)
774{
775
776	return;
777}
778
779/*
780 * Fill the given physical page with zeroes.
781 */
782void
783pmap_zero_page(vm_offset_t pa)
784{
785#if 0
786	bzero((caddr_t)pa, PAGE_SIZE);
787#else
788	int	i;
789
790	for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
791		__asm __volatile ("dcbz 0,%0" :: "r"(pa));
792		pa += CACHELINESIZE;
793	}
794#endif
795}
796
797void
798pmap_zero_page_area(vm_offset_t pa, int off, int size)
799{
800
801	bzero((caddr_t)pa + off, size);
802}
803
804/*
805 * Copy the given physical source page to its destination.
806 */
807void
808pmap_copy_page(vm_offset_t src, vm_offset_t dst)
809{
810
811	bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
812}
813
814static struct pv_entry *
815pmap_alloc_pv()
816{
817	pv_entry_count++;
818
819	if (pv_entry_high_water &&
820	    (pv_entry_count > pv_entry_high_water) &&
821	    (pmap_pagedaemon_waken == 0)) {
822		pmap_pagedaemon_waken = 1;
823		wakeup(&vm_pages_needed);
824	}
825
826	return zalloc(pvzone);
827}
828
829static void
830pmap_free_pv(struct pv_entry *pv)
831{
832
833	pv_entry_count--;
834	zfree(pvzone, pv);
835}
836
837/*
838 * We really hope that we don't need overflow entries
839 * before the VM system is initialized!
840 *
841 * XXX: Should really be switched over to the zone allocator.
842 */
843static struct pte_ovfl *
844poalloc()
845{
846	struct po_page	*pop;
847	struct pte_ovfl	*po;
848	vm_page_t	mem;
849	int		i;
850
851	if (!pmap_initialized) {
852		panic("poalloc");
853	}
854
855	if (po_nfree == 0) {
856		/*
857		 * Since we cannot use maps for potable allocation,
858		 * we have to steal some memory from the VM system.			XXX
859		 */
860		mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
861		po_pcnt++;
862		pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
863		pop->pop_pgi.pgi_page = mem;
864		LIST_INIT(&pop->pop_pgi.pgi_freelist);
865		for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
866			LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
867			    po_list);
868		}
869		po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
870		LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
871		po = pop->pop_po;
872	} else {
873		po_nfree--;
874		pop = po_page_freelist.lh_first;
875		if (--pop->pop_pgi.pgi_nfree <= 0) {
876			LIST_REMOVE(pop, pop_pgi.pgi_list);
877		}
878		po = pop->pop_pgi.pgi_freelist.lh_first;
879		LIST_REMOVE(po, po_list);
880	}
881
882	return po;
883}
884
885static void
886pofree(struct pte_ovfl *po, int freepage)
887{
888	struct po_page	*pop;
889
890	pop = (struct po_page *)trunc_page((vm_offset_t)po);
891	switch (++pop->pop_pgi.pgi_nfree) {
892	case NPOPPG:
893		if (!freepage) {
894			break;
895		}
896		po_nfree -= NPOPPG - 1;
897		po_pcnt--;
898		LIST_REMOVE(pop, pop_pgi.pgi_list);
899		vm_page_free(pop->pop_pgi.pgi_page);
900		return;
901	case 1:
902		LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
903	default:
904		break;
905	}
906	LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
907	po_nfree++;
908}
909
910/*
911 * This returns whether this is the first mapping of a page.
912 */
913static int
914pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
915{
916	struct pv_entry	*pv, *npv;
917	int		s, first;
918
919	if (!pmap_initialized) {
920		return 0;
921	}
922
923	s = splimp();
924
925	pv = pa_to_pv(pa);
926	first = pv->pv_idx;
927	if (pv->pv_idx == -1) {
928		/*
929		 * No entries yet, use header as the first entry.
930		 */
931		pv->pv_va = va;
932		pv->pv_idx = pteidx;
933		pv->pv_next = NULL;
934	} else {
935		/*
936		 * There is at least one other VA mapping this page.
937		 * Place this entry after the header.
938		 */
939		npv = pmap_alloc_pv();
940		npv->pv_va = va;
941		npv->pv_idx = pteidx;
942		npv->pv_next = pv->pv_next;
943		pv->pv_next = npv;
944	}
945	splx(s);
946	return first;
947}
948
949static void
950pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
951{
952	struct pv_entry	*pv, *npv;
953	char		*attr;
954
955	/*
956	 * First transfer reference/change bits to cache.
957	 */
958	attr = pa_to_attr(pa);
959	if (attr == NULL) {
960		return;
961	}
962	*attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
963
964	/*
965	 * Remove from the PV table.
966	 */
967	pv = pa_to_pv(pa);
968
969	/*
970	 * If it is the first entry on the list, it is actually
971	 * in the header and we must copy the following entry up
972	 * to the header.  Otherwise we must search the list for
973	 * the entry.  In either case we free the now unused entry.
974	 */
975	if (pteidx == pv->pv_idx && va == pv->pv_va) {
976		npv = pv->pv_next;
977		if (npv) {
978			*pv = *npv;
979			pmap_free_pv(npv);
980		} else {
981			pv->pv_idx = -1;
982		}
983	} else {
984		for (; (npv = pv->pv_next); pv = npv) {
985			if (pteidx == npv->pv_idx && va == npv->pv_va) {
986				break;
987			}
988		}
989		if (npv) {
990			pv->pv_next = npv->pv_next;
991			pmap_free_pv(npv);
992		}
993#ifdef	DIAGNOSTIC
994		else {
995			panic("pmap_remove_pv: not on list\n");
996		}
997#endif
998	}
999}
1000
1001/*
1002 * Insert physical page at pa into the given pmap at virtual address va.
1003 */
1004void
1005pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
1006    boolean_t wired)
1007{
1008	sr_t			sr;
1009	int			idx, s;
1010	pte_t			pte;
1011	struct pte_ovfl		*po;
1012	struct mem_region	*mp;
1013	vm_offset_t		pa;
1014
1015	pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
1016
1017	/*
1018	 * Have to remove any existing mapping first.
1019	 */
1020	pmap_remove(pm, va, va + PAGE_SIZE);
1021
1022	/*
1023	 * Compute the HTAB index.
1024	 */
1025	idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1026	/*
1027	 * Construct the PTE.
1028	 *
1029	 * Note: Don't set the valid bit for correct operation of tlb update.
1030	 */
1031	pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
1032		| ((va & ADDR_PIDX) >> ADDR_API_SHFT);
1033	pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
1034
1035	for (mp = mem; mp->size; mp++) {
1036		if (pa >= mp->start && pa < mp->start + mp->size) {
1037			pte.pte_lo &= ~(PTE_I | PTE_G);
1038			break;
1039		}
1040	}
1041	if (prot & VM_PROT_WRITE) {
1042		pte.pte_lo |= PTE_RW;
1043	} else {
1044		pte.pte_lo |= PTE_RO;
1045	}
1046
1047	/*
1048	 * Now record mapping for later back-translation.
1049	 */
1050	if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
1051		if (pmap_enter_pv(idx, va, pa)) {
1052			/*
1053			 * Flush the real memory from the cache.
1054			 */
1055			__syncicache((void *)pa, PAGE_SIZE);
1056		}
1057	}
1058
1059	s = splimp();
1060	pm->pm_stats.resident_count++;
1061	/*
1062	 * Try to insert directly into HTAB.
1063	 */
1064	if (pte_insert(idx, &pte)) {
1065		splx(s);
1066		return;
1067	}
1068
1069	/*
1070	 * Have to allocate overflow entry.
1071	 *
1072	 * Note, that we must use real addresses for these.
1073	 */
1074	po = poalloc();
1075	po->po_pte = pte;
1076	LIST_INSERT_HEAD(potable + idx, po, po_list);
1077	splx(s);
1078}
1079
1080void
1081pmap_kenter(vm_offset_t va, vm_offset_t pa)
1082{
1083	struct vm_page	pg;
1084
1085	pg.phys_addr = pa;
1086	pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1087}
1088
1089void
1090pmap_kremove(vm_offset_t va)
1091{
1092	pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
1093}
1094
1095/*
1096 * Remove the given range of mapping entries.
1097 */
1098void
1099pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
1100{
1101	int		idx, i, s;
1102	sr_t		sr;
1103	pte_t		*ptp;
1104	struct pte_ovfl	*po, *npo;
1105
1106	s = splimp();
1107	while (va < endva) {
1108		idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1109		for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1110			if (ptematch(ptp, sr, va, PTE_VALID)) {
1111				pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1112				ptp->pte_hi &= ~PTE_VALID;
1113				__asm __volatile ("sync");
1114				tlbie(va);
1115				tlbsync();
1116				pm->pm_stats.resident_count--;
1117			}
1118		}
1119		for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1120		    ptp++) {
1121			if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1122				pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1123				ptp->pte_hi &= ~PTE_VALID;
1124				__asm __volatile ("sync");
1125				tlbie(va);
1126				tlbsync();
1127				pm->pm_stats.resident_count--;
1128			}
1129		}
1130		for (po = potable[idx].lh_first; po; po = npo) {
1131			npo = po->po_list.le_next;
1132			if (ptematch(&po->po_pte, sr, va, 0)) {
1133				pmap_remove_pv(idx, va, po->po_pte.pte_lo,
1134					       &po->po_pte);
1135				LIST_REMOVE(po, po_list);
1136				pofree(po, 1);
1137				pm->pm_stats.resident_count--;
1138			}
1139		}
1140		va += PAGE_SIZE;
1141	}
1142	splx(s);
1143}
1144
1145static pte_t *
1146pte_find(struct pmap *pm, vm_offset_t va)
1147{
1148	int		idx, i;
1149	sr_t		sr;
1150	pte_t		*ptp;
1151	struct pte_ovfl	*po;
1152
1153	idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1154	for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1155		if (ptematch(ptp, sr, va, PTE_VALID)) {
1156			return ptp;
1157		}
1158	}
1159	for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
1160		if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1161			return ptp;
1162		}
1163	}
1164	for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
1165		if (ptematch(&po->po_pte, sr, va, 0)) {
1166			return &po->po_pte;
1167		}
1168	}
1169	return 0;
1170}
1171
1172/*
1173 * Get the physical page address for the given pmap/virtual address.
1174 */
1175vm_offset_t
1176pmap_extract(pmap_t pm, vm_offset_t va)
1177{
1178	pte_t	*ptp;
1179	int	s;
1180
1181	s = splimp();
1182
1183	if (!(ptp = pte_find(pm, va))) {
1184		splx(s);
1185		return (0);
1186	}
1187	splx(s);
1188	return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1189}
1190
1191/*
1192 * Lower the protection on the specified range of this pmap.
1193 *
1194 * There are only two cases: either the protection is going to 0,
1195 * or it is going to read-only.
1196 */
1197void
1198pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1199{
1200	pte_t	*ptp;
1201	int	valid, s;
1202
1203	if (prot & VM_PROT_READ) {
1204		s = splimp();
1205		while (sva < eva) {
1206			ptp = pte_find(pm, sva);
1207			if (ptp) {
1208				valid = ptp->pte_hi & PTE_VALID;
1209				ptp->pte_hi &= ~PTE_VALID;
1210				__asm __volatile ("sync");
1211				tlbie(sva);
1212				tlbsync();
1213				ptp->pte_lo &= ~PTE_PP;
1214				ptp->pte_lo |= PTE_RO;
1215				__asm __volatile ("sync");
1216				ptp->pte_hi |= valid;
1217			}
1218			sva += PAGE_SIZE;
1219		}
1220		splx(s);
1221		return;
1222	}
1223	pmap_remove(pm, sva, eva);
1224}
1225
1226boolean_t
1227ptemodify(vm_page_t pg, u_int mask, u_int val)
1228{
1229	vm_offset_t	pa;
1230	struct pv_entry	*pv;
1231	pte_t		*ptp;
1232	struct pte_ovfl	*po;
1233	int		i, s;
1234	char		*attr;
1235	int		rv;
1236
1237	pa = VM_PAGE_TO_PHYS(pg);
1238
1239	/*
1240	 * First modify bits in cache.
1241	 */
1242	attr = pa_to_attr(pa);
1243	if (attr == NULL) {
1244		return FALSE;
1245	}
1246
1247	*attr &= ~mask >> ATTRSHFT;
1248	*attr |= val >> ATTRSHFT;
1249
1250	pv = pa_to_pv(pa);
1251	if (pv->pv_idx < 0) {
1252		return FALSE;
1253	}
1254
1255	rv = FALSE;
1256	s = splimp();
1257	for (; pv; pv = pv->pv_next) {
1258		for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1259			if ((ptp->pte_hi & PTE_VALID)
1260			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1261				ptp->pte_hi &= ~PTE_VALID;
1262				__asm __volatile ("sync");
1263				tlbie(pv->pv_va);
1264				tlbsync();
1265				rv |= ptp->pte_lo & mask;
1266				ptp->pte_lo &= ~mask;
1267				ptp->pte_lo |= val;
1268				__asm __volatile ("sync");
1269				ptp->pte_hi |= PTE_VALID;
1270			}
1271		}
1272		for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1273		    --i >= 0; ptp++) {
1274			if ((ptp->pte_hi & PTE_VALID)
1275			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1276				ptp->pte_hi &= ~PTE_VALID;
1277				__asm __volatile ("sync");
1278				tlbie(pv->pv_va);
1279				tlbsync();
1280				rv |= ptp->pte_lo & mask;
1281				ptp->pte_lo &= ~mask;
1282				ptp->pte_lo |= val;
1283				__asm __volatile ("sync");
1284				ptp->pte_hi |= PTE_VALID;
1285			}
1286		}
1287		for (po = potable[pv->pv_idx].lh_first; po;
1288		    po = po->po_list.le_next) {
1289			if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1290				rv |= ptp->pte_lo & mask;
1291				po->po_pte.pte_lo &= ~mask;
1292				po->po_pte.pte_lo |= val;
1293			}
1294		}
1295	}
1296	splx(s);
1297	return rv != 0;
1298}
1299
1300int
1301ptebits(vm_page_t pg, int bit)
1302{
1303	struct pv_entry	*pv;
1304	pte_t		*ptp;
1305	struct pte_ovfl	*po;
1306	int		i, s, bits;
1307	char		*attr;
1308	vm_offset_t	pa;
1309
1310	bits = 0;
1311	pa = VM_PAGE_TO_PHYS(pg);
1312
1313	/*
1314	 * First try the cache.
1315	 */
1316	attr = pa_to_attr(pa);
1317	if (attr == NULL) {
1318		return 0;
1319	}
1320	bits |= (*attr << ATTRSHFT) & bit;
1321	if (bits == bit) {
1322		return bits;
1323	}
1324
1325	pv = pa_to_pv(pa);
1326	if (pv->pv_idx < 0) {
1327		return 0;
1328	}
1329
1330	s = splimp();
1331	for (; pv; pv = pv->pv_next) {
1332		for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1333			if ((ptp->pte_hi & PTE_VALID)
1334			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1335				bits |= ptp->pte_lo & bit;
1336				if (bits == bit) {
1337					splx(s);
1338					return bits;
1339				}
1340			}
1341		}
1342		for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1343		    --i >= 0; ptp++) {
1344			if ((ptp->pte_hi & PTE_VALID)
1345			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1346				bits |= ptp->pte_lo & bit;
1347				if (bits == bit) {
1348					splx(s);
1349					return bits;
1350				}
1351			}
1352		}
1353		for (po = potable[pv->pv_idx].lh_first; po;
1354		    po = po->po_list.le_next) {
1355			if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1356				bits |= po->po_pte.pte_lo & bit;
1357				if (bits == bit) {
1358					splx(s);
1359					return bits;
1360				}
1361			}
1362		}
1363	}
1364	splx(s);
1365	return bits;
1366}
1367
1368/*
1369 * Lower the protection on the specified physical page.
1370 *
1371 * There are only two cases: either the protection is going to 0,
1372 * or it is going to read-only.
1373 */
1374void
1375pmap_page_protect(vm_page_t m, vm_prot_t prot)
1376{
1377	vm_offset_t	pa;
1378	vm_offset_t	va;
1379	pte_t		*ptp;
1380	struct pte_ovfl	*po, *npo;
1381	int		i, s, idx;
1382	struct pv_entry	*pv;
1383
1384	pa = VM_PAGE_TO_PHYS(m);
1385
1386	pa &= ~ADDR_POFF;
1387	if (prot & VM_PROT_READ) {
1388		ptemodify(m, PTE_PP, PTE_RO);
1389		return;
1390	}
1391
1392	pv = pa_to_pv(pa);
1393	if (pv == NULL) {
1394		return;
1395	}
1396
1397	s = splimp();
1398	while (pv->pv_idx >= 0) {
1399		idx = pv->pv_idx;
1400		va = pv->pv_va;
1401		for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1402			if ((ptp->pte_hi & PTE_VALID)
1403			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1404				pmap_remove_pv(idx, va, pa, ptp);
1405				ptp->pte_hi &= ~PTE_VALID;
1406				__asm __volatile ("sync");
1407				tlbie(va);
1408				tlbsync();
1409				goto next;
1410			}
1411		}
1412		for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1413		    ptp++) {
1414			if ((ptp->pte_hi & PTE_VALID)
1415			    && (ptp->pte_lo & PTE_RPGN) == pa) {
1416				pmap_remove_pv(idx, va, pa, ptp);
1417				ptp->pte_hi &= ~PTE_VALID;
1418				__asm __volatile ("sync");
1419				tlbie(va);
1420				tlbsync();
1421				goto next;
1422			}
1423		}
1424		for (po = potable[idx].lh_first; po; po = npo) {
1425			npo = po->po_list.le_next;
1426			if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1427				pmap_remove_pv(idx, va, pa, &po->po_pte);
1428				LIST_REMOVE(po, po_list);
1429				pofree(po, 1);
1430				goto next;
1431			}
1432		}
1433next:
1434	}
1435	splx(s);
1436}
1437
1438/*
1439 * Activate the address space for the specified process.  If the process
1440 * is the current process, load the new MMU context.
1441 */
1442void
1443pmap_activate(struct proc *p)
1444{
1445	struct pcb	*pcb;
1446	pmap_t		pmap;
1447	pmap_t		rpm;
1448	int		psl, i, ksr, seg;
1449
1450	pcb = &p->p_addr->u_pcb;
1451	pmap = p->p_vmspace->vm_map.pmap;
1452
1453	/*
1454	 * XXX Normally performed in cpu_fork().
1455	 */
1456	if (pcb->pcb_pm != pmap) {
1457		pcb->pcb_pm = pmap;
1458		(vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
1459		    (vm_offset_t)pcb->pcb_pm);
1460	}
1461
1462	if (p == curproc) {
1463		/* Disable interrupts while switching. */
1464		__asm __volatile("mfmsr %0" : "=r"(psl) :);
1465		psl &= ~PSL_EE;
1466		__asm __volatile("mtmsr %0" :: "r"(psl));
1467
1468#if 0 /* XXX */
1469		/* Store pointer to new current pmap. */
1470		curpm = pcb->pcb_pmreal;
1471#endif
1472
1473		/* Save kernel SR. */
1474		__asm __volatile("mfsr %0,14" : "=r"(ksr) :);
1475
1476		/*
1477		 * Set new segment registers.  We use the pmap's real
1478		 * address to avoid accessibility problems.
1479		 */
1480		rpm = pcb->pcb_pmreal;
1481		for (i = 0; i < 16; i++) {
1482			seg = rpm->pm_sr[i];
1483			__asm __volatile("mtsrin %0,%1"
1484			    :: "r"(seg), "r"(i << ADDR_SR_SHFT));
1485		}
1486
1487		/* Restore kernel SR. */
1488		__asm __volatile("mtsr 14,%0" :: "r"(ksr));
1489
1490		/* Interrupts are OK again. */
1491		psl |= PSL_EE;
1492		__asm __volatile("mtmsr %0" :: "r"(psl));
1493	}
1494}
1495
1496/*
1497 * Add a list of wired pages to the kva
1498 * this routine is only used for temporary
1499 * kernel mappings that do not need to have
1500 * page modification or references recorded.
1501 * Note that old mappings are simply written
1502 * over.  The page *must* be wired.
1503 */
1504void
1505pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1506{
1507	int	i;
1508
1509	for (i = 0; i < count; i++) {
1510		vm_offset_t tva = va + i * PAGE_SIZE;
1511		pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
1512	}
1513}
1514
1515/*
1516 * this routine jerks page mappings from the
1517 * kernel -- it is meant only for temporary mappings.
1518 */
1519void
1520pmap_qremove(vm_offset_t va, int count)
1521{
1522	vm_offset_t	end_va;
1523
1524	end_va = va + count*PAGE_SIZE;
1525
1526	while (va < end_va) {
1527		unsigned *pte;
1528
1529		pte = (unsigned *)vtopte(va);
1530		*pte = 0;
1531		tlbie(va);
1532		va += PAGE_SIZE;
1533	}
1534}
1535
1536/*
1537 * 	pmap_ts_referenced:
1538 *
1539 *	Return the count of reference bits for a page, clearing all of them.
1540 */
1541int
1542pmap_ts_referenced(vm_page_t m)
1543{
1544
1545	/* XXX: coming soon... */
1546	return (0);
1547}
1548
1549/*
1550 * this routine returns true if a physical page resides
1551 * in the given pmap.
1552 */
1553boolean_t
1554pmap_page_exists(pmap_t pmap, vm_page_t m)
1555{
1556#if 0 /* XXX: This must go! */
1557	register pv_entry_t pv;
1558	int s;
1559
1560	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1561		return FALSE;
1562
1563	s = splvm();
1564
1565	/*
1566	 * Not found, check current mappings returning immediately if found.
1567	 */
1568	for (pv = pv_table; pv; pv = pv->pv_next) {
1569		if (pv->pv_pmap == pmap) {
1570			splx(s);
1571			return TRUE;
1572		}
1573	}
1574	splx(s);
1575#endif
1576	return (FALSE);
1577}
1578
1579/*
1580 *	Used to map a range of physical addresses into kernel
1581 *	virtual address space.
1582 *
1583 *	For now, VM is already on, we only need to map the
1584 *	specified memory.
1585 */
1586vm_offset_t
1587pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
1588{
1589	vm_offset_t	sva, va;
1590
1591	sva = *virt;
1592	va = sva;
1593
1594	while (start < end) {
1595		pmap_kenter(va, start);
1596		va += PAGE_SIZE;
1597		start += PAGE_SIZE;
1598	}
1599
1600	*virt = va;
1601	return (sva);
1602}
1603
1604vm_offset_t
1605pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
1606{
1607
1608	return (addr);
1609}
1610
1611int
1612pmap_mincore(pmap_t pmap, vm_offset_t addr)
1613{
1614
1615	/* XXX: coming soon... */
1616	return (0);
1617}
1618
1619void
1620pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1621    vm_pindex_t pindex, vm_size_t size, int limit)
1622{
1623
1624	/* XXX: coming soon... */
1625	return;
1626}
1627
1628void
1629pmap_growkernel(vm_offset_t addr)
1630{
1631
1632	/* XXX: coming soon... */
1633	return;
1634}
1635
1636/*
1637 * Initialize the address space (zone) for the pv_entries.  Set a
1638 * high water mark so that the system can recover from excessive
1639 * numbers of pv entries.
1640 */
1641void
1642pmap_init2()
1643{
1644	pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
1645	pv_entry_high_water = 9 * (pv_entry_max / 10);
1646	zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
1647}
1648
1649void
1650pmap_swapin_proc(struct proc *p)
1651{
1652
1653	/* XXX: coming soon... */
1654	return;
1655}
1656
1657void
1658pmap_swapout_proc(struct proc *p)
1659{
1660
1661	/* XXX: coming soon... */
1662	return;
1663}
1664
1665void
1666pmap_new_proc(struct proc *p)
1667{
1668
1669	/* XXX: coming soon... */
1670	return;
1671}
1672
1673void
1674pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
1675{
1676
1677	return;
1678}
1679
1680void
1681pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
1682{
1683
1684	/* XXX: coming soon... */
1685	return;
1686}
1687
1688void
1689pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
1690{
1691
1692	/* XXX: coming soon... */
1693	return;
1694}
1695
1696void
1697pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1698{
1699
1700	/* XXX: coming soon... */
1701	return;
1702}
1703
1704void
1705pmap_pinit0(pmap_t pmap)
1706{
1707
1708	/* XXX: coming soon... */
1709	return;
1710}
1711
1712void
1713pmap_dispose_proc(struct proc *p)
1714{
1715
1716	/* XXX: coming soon... */
1717	return;
1718}
1719
1720vm_offset_t
1721pmap_steal_memory(vm_size_t size)
1722{
1723	vm_size_t bank_size;
1724	vm_offset_t pa;
1725
1726	size = round_page(size);
1727
1728	bank_size = phys_avail[1] - phys_avail[0];
1729	while (size > bank_size) {
1730		int i;
1731		for (i = 0; phys_avail[i+2]; i+= 2) {
1732			phys_avail[i] = phys_avail[i+2];
1733			phys_avail[i+1] = phys_avail[i+3];
1734		}
1735		phys_avail[i] = 0;
1736		phys_avail[i+1] = 0;
1737		if (!phys_avail[0])
1738			panic("pmap_steal_memory: out of memory");
1739		bank_size = phys_avail[1] - phys_avail[0];
1740	}
1741
1742	pa = phys_avail[0];
1743	phys_avail[0] += size;
1744
1745	bzero((caddr_t) pa, size);
1746	return pa;
1747}
1748