Deleted Added
sdiff udiff text old ( 85201 ) new ( 90643 )
full compact
1/*
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright

--- 42 unchanged lines hidden (view full) ---

52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 */
57
58#ifndef lint
59static const char rcsid[] =
60 "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 85201 2001-10-19 22:45:46Z mp $";
61#endif /* not lint */
62
63#include <sys/param.h>
64#include <sys/systm.h>
65#include <sys/kernel.h>
66#include <sys/proc.h>
67#include <sys/malloc.h>
68#include <sys/msgbuf.h>
69#include <sys/vmmeter.h>
70#include <sys/mman.h>
71#include <sys/queue.h>
72#include <sys/lock.h>
73#include <sys/mutex.h>
74
75#include <vm/vm.h>
76#include <vm/vm_param.h>
77#include <vm/vm_kern.h>
78#include <vm/vm_page.h>
79#include <vm/vm_map.h>
80#include <vm/vm_object.h>
81#include <vm/vm_extern.h>
82#include <vm/vm_pageout.h>
83#include <vm/vm_pager.h>
84#include <vm/vm_zone.h>
85
86#include <sys/user.h>
87
88#include <machine/bat.h>
89#include <machine/pcb.h>
90#include <machine/powerpc.h>
91#include <machine/pte.h>
92
93pte_t *ptable;
94int ptab_cnt;
95u_int ptab_mask;
96#define HTABSIZE (ptab_cnt * 64)
97
98#define MINPV 2048
99
100struct pte_ovfl {
101 LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */
102 struct pte po_pte; /* PTE for this mapping */
103};
104
105LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
106
107static struct pmap kernel_pmap_store;
108pmap_t kernel_pmap;
109
110static int npgs;
111static u_int nextavail;
112
113#ifndef MSGBUFADDR
114extern vm_offset_t msgbuf_paddr;
115#endif
116
117static struct mem_region *mem, *avail;
118
119vm_offset_t avail_start;
120vm_offset_t avail_end;
121vm_offset_t virtual_avail;
122vm_offset_t virtual_end;
123
124vm_offset_t kernel_vm_end;
125
126static int pmap_pagedaemon_waken = 0;
127
128extern unsigned int Maxmem;
129
130#define ATTRSHFT 4
131
132struct pv_entry *pv_table;
133
134static vm_zone_t pvzone;
135static struct vm_zone pvzone_store;
136static struct vm_object pvzone_obj;
137static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
138static struct pv_entry *pvinit;
139
140#if !defined(PMAP_SHPGPERPROC)
141#define PMAP_SHPGPERPROC 200
142#endif
143
144struct pv_page;
145struct pv_page_info {
146 LIST_ENTRY(pv_page) pgi_list;
147 struct pv_entry *pgi_freelist;
148 int pgi_nfree;
149};
150#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
151struct pv_page {
152 struct pv_page_info pvp_pgi;
153 struct pv_entry pvp_pv[NPVPPG];
154};
155LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
156int pv_nfree;
157int pv_pcnt;
158static struct pv_entry *pmap_alloc_pv(void);
159static void pmap_free_pv(struct pv_entry *);
160
161struct po_page;
162struct po_page_info {
163 LIST_ENTRY(po_page) pgi_list;
164 vm_page_t pgi_page;
165 LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
166 int pgi_nfree;
167};
168#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
169struct po_page {
170 struct po_page_info pop_pgi;
171 struct pte_ovfl pop_po[NPOPPG];
172};
173LIST_HEAD(po_page_list, po_page) po_page_freelist;
174int po_nfree;
175int po_pcnt;
176static struct pte_ovfl *poalloc(void);
177static void pofree(struct pte_ovfl *, int);
178
179static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
180
181static int pmap_initialized;
182
183int pte_spill(vm_offset_t);
184
185/*
186 * These small routines may have to be replaced,
187 * if/when we support processors other that the 604.
188 */
189static __inline void
190tlbie(vm_offset_t ea)
191{
192
193 __asm __volatile ("tlbie %0" :: "r"(ea));
194}
195
196static __inline void
197tlbsync(void)
198{
199
200 __asm __volatile ("sync; tlbsync; sync");
201}
202
203static __inline void
204tlbia(void)
205{
206 vm_offset_t i;
207
208 __asm __volatile ("sync");
209 for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
210 tlbie(i);
211 }
212 tlbsync();
213}
214
215static __inline int
216ptesr(sr_t *sr, vm_offset_t addr)
217{
218
219 return sr[(u_int)addr >> ADDR_SR_SHFT];
220}
221
222static __inline int
223pteidx(sr_t sr, vm_offset_t addr)
224{
225 int hash;
226
227 hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
228 return hash & ptab_mask;
229}
230
231static __inline int
232ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
233{
234
235 return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
236 (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
237}
238
239static __inline struct pv_entry *
240pa_to_pv(vm_offset_t pa)
241{
242#if 0 /* XXX */
243 int bank, pg;
244
245 bank = vm_physseg_find(atop(pa), &pg);
246 if (bank == -1)
247 return NULL;
248 return &vm_physmem[bank].pmseg.pvent[pg];
249#endif
250 return (NULL);
251}
252
253static __inline char *
254pa_to_attr(vm_offset_t pa)
255{
256#if 0 /* XXX */
257 int bank, pg;
258
259 bank = vm_physseg_find(atop(pa), &pg);
260 if (bank == -1)
261 return NULL;
262 return &vm_physmem[bank].pmseg.attrs[pg];
263#endif
264 return (NULL);
265}
266
267/*
268 * Try to insert page table entry *pt into the ptable at idx.
269 *
270 * Note: *pt mustn't have PTE_VALID set.
271 * This is done here as required by Book III, 4.12.
272 */
273static int
274pte_insert(int idx, pte_t *pt)
275{
276 pte_t *ptp;
277 int i;
278
279 /*
280 * First try primary hash.
281 */
282 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
283 if (!(ptp->pte_hi & PTE_VALID)) {
284 *ptp = *pt;
285 ptp->pte_hi &= ~PTE_HID;
286 __asm __volatile ("sync");
287 ptp->pte_hi |= PTE_VALID;
288 return 1;
289 }
290 }
291
292 /*
293 * Then try secondary hash.
294 */
295
296 idx ^= ptab_mask;
297
298 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
299 if (!(ptp->pte_hi & PTE_VALID)) {
300 *ptp = *pt;
301 ptp->pte_hi |= PTE_HID;
302 __asm __volatile ("sync");
303 ptp->pte_hi |= PTE_VALID;
304 return 1;
305 }
306 }
307
308 return 0;
309}
310
311/*
312 * Spill handler.
313 *
314 * Tries to spill a page table entry from the overflow area.
315 * Note that this routine runs in real mode on a separate stack,
316 * with interrupts disabled.
317 */
318int
319pte_spill(vm_offset_t addr)
320{
321 int idx, i;
322 sr_t sr;
323 struct pte_ovfl *po;
324 pte_t ps;
325 pte_t *pt;
326
327 __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
328 idx = pteidx(sr, addr);
329 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
330 if (ptematch(&po->po_pte, sr, addr, 0)) {
331 /*
332 * Now found an entry to be spilled into the real
333 * ptable.
334 */
335 if (pte_insert(idx, &po->po_pte)) {
336 LIST_REMOVE(po, po_list);
337 pofree(po, 0);
338 return 1;
339 }
340 /*
341 * Have to substitute some entry. Use the primary
342 * hash for this.
343 *
344 * Use low bits of timebase as random generator
345 */
346 __asm ("mftb %0" : "=r"(i));
347 pt = ptable + idx * 8 + (i & 7);
348 pt->pte_hi &= ~PTE_VALID;
349 ps = *pt;
350 __asm __volatile ("sync");
351 tlbie(addr);
352 tlbsync();
353 *pt = po->po_pte;
354 __asm __volatile ("sync");
355 pt->pte_hi |= PTE_VALID;
356 po->po_pte = ps;
357 if (ps.pte_hi & PTE_HID) {
358 /*
359 * We took an entry that was on the alternate
360 * hash chain, so move it to it's original
361 * chain.
362 */
363 po->po_pte.pte_hi &= ~PTE_HID;
364 LIST_REMOVE(po, po_list);
365 LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
366 po, po_list);
367 }
368 return 1;
369 }
370 }
371
372 return 0;
373}
374
375/*
376 * This is called during powerpc_init, before the system is really initialized.
377 */
378void
379pmap_setavailmem(u_int kernelstart, u_int kernelend)
380{
381 struct mem_region *mp, *mp1;
382 int cnt, i;
383 u_int s, e, sz;
384
385 /*
386 * Get memory.
387 */
388 mem_regions(&mem, &avail);
389 for (mp = mem; mp->size; mp++)
390 Maxmem += btoc(mp->size);
391
392 /*
393 * Count the number of available entries.
394 */
395 for (cnt = 0, mp = avail; mp->size; mp++) {
396 cnt++;
397 }
398
399 /*
400 * Page align all regions.
401 * Non-page aligned memory isn't very interesting to us.
402 * Also, sort the entries for ascending addresses.
403 */
404 kernelstart &= ~PAGE_MASK;
405 kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
406 for (mp = avail; mp->size; mp++) {
407 s = mp->start;
408 e = mp->start + mp->size;
409 /*
410 * Check whether this region holds all of the kernel.
411 */
412 if (s < kernelstart && e > kernelend) {
413 avail[cnt].start = kernelend;
414 avail[cnt++].size = e - kernelend;
415 e = kernelstart;
416 }
417 /*
418 * Look whether this regions starts within the kernel.
419 */
420 if (s >= kernelstart && s < kernelend) {
421 if (e <= kernelend)
422 goto empty;
423 s = kernelend;
424 }
425 /*
426 * Now look whether this region ends within the kernel.
427 */
428 if (e > kernelstart && e <= kernelend) {
429 if (s >= kernelstart)
430 goto empty;
431 e = kernelstart;
432 }
433 /*
434 * Now page align the start and size of the region.
435 */
436 s = round_page(s);
437 e = trunc_page(e);
438 if (e < s) {
439 e = s;
440 }
441 sz = e - s;
442 /*
443 * Check whether some memory is left here.
444 */
445 if (sz == 0) {
446 empty:
447 bcopy(mp + 1, mp,
448 (cnt - (mp - avail)) * sizeof *mp);
449 cnt--;
450 mp--;
451 continue;
452 }
453
454 /*
455 * Do an insertion sort.
456 */
457 npgs += btoc(sz);
458
459 for (mp1 = avail; mp1 < mp; mp1++) {
460 if (s < mp1->start) {
461 break;
462 }
463 }
464
465 if (mp1 < mp) {
466 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
467 mp1->start = s;
468 mp1->size = sz;
469 } else {
470 mp->start = s;
471 mp->size = sz;
472 }
473 }
474
475#ifdef HTABENTS
476 ptab_cnt = HTABENTS;
477#else
478 ptab_cnt = (Maxmem + 1) / 2;
479
480 /* The minimum is 1024 PTEGs. */
481 if (ptab_cnt < 1024) {
482 ptab_cnt = 1024;
483 }
484
485 /* Round up to power of 2. */
486 __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
487 ptab_cnt = 1 << (32 - i);
488#endif
489
490 /*
491 * Find suitably aligned memory for HTAB.
492 */
493 for (mp = avail; mp->size; mp++) {
494 s = roundup(mp->start, HTABSIZE) - mp->start;
495
496 if (mp->size < s + HTABSIZE) {
497 continue;
498 }
499
500 ptable = (pte_t *)(mp->start + s);
501
502 if (mp->size == s + HTABSIZE) {
503 if (s)
504 mp->size = s;
505 else {
506 bcopy(mp + 1, mp,
507 (cnt - (mp - avail)) * sizeof *mp);
508 mp = avail;
509 }
510 break;
511 }
512
513 if (s != 0) {
514 bcopy(mp, mp + 1,
515 (cnt - (mp - avail)) * sizeof *mp);
516 mp++->size = s;
517 cnt++;
518 }
519
520 mp->start += s + HTABSIZE;
521 mp->size -= s + HTABSIZE;
522 break;
523 }
524
525 if (!mp->size) {
526 panic("not enough memory?");
527 }
528
529 npgs -= btoc(HTABSIZE);
530 bzero((void *)ptable, HTABSIZE);
531 ptab_mask = ptab_cnt - 1;
532
533 /*
534 * We cannot do pmap_steal_memory here,
535 * since we don't run with translation enabled yet.
536 */
537 s = sizeof(struct pte_ovtab) * ptab_cnt;
538 sz = round_page(s);
539
540 for (mp = avail; mp->size; mp++) {
541 if (mp->size >= sz) {
542 break;
543 }
544 }
545
546 if (!mp->size) {
547 panic("not enough memory?");
548 }
549
550 npgs -= btoc(sz);
551 potable = (struct pte_ovtab *)mp->start;
552 mp->size -= sz;
553 mp->start += sz;
554
555 if (mp->size <= 0) {
556 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
557 }
558
559 for (i = 0; i < ptab_cnt; i++) {
560 LIST_INIT(potable + i);
561 }
562
563#ifndef MSGBUFADDR
564 /*
565 * allow for msgbuf
566 */
567 sz = round_page(MSGBUFSIZE);
568 mp = NULL;
569
570 for (mp1 = avail; mp1->size; mp1++) {
571 if (mp1->size >= sz) {
572 mp = mp1;
573 }
574 }
575
576 if (mp == NULL) {
577 panic("not enough memory?");
578 }
579
580 npgs -= btoc(sz);
581 msgbuf_paddr = mp->start + mp->size - sz;
582 mp->size -= sz;
583
584 if (mp->size <= 0) {
585 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
586 }
587#endif
588
589 nextavail = avail->start;
590 avail_start = avail->start;
591 for (mp = avail, i = 0; mp->size; mp++) {
592 avail_end = mp->start + mp->size;
593 phys_avail[i++] = mp->start;
594 phys_avail[i++] = mp->start + mp->size;
595 }
596
597
598}
599
600void
601pmap_bootstrap()
602{
603 int i;
604 u_int32_t batl, batu;
605
606 /*
607 * Initialize kernel pmap and hardware.
608 */
609 kernel_pmap = &kernel_pmap_store;
610
611 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
612 batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
613 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
614
615#if NPMAPS >= KERNEL_SEGMENT / 16
616 usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
617 |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
618#endif
619
620#if 0 /* XXX */
621 for (i = 0; i < 16; i++) {
622 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
623 __asm __volatile ("mtsrin %0,%1"
624 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
625 }
626#endif
627
628 for (i = 0; i < 16; i++) {
629 int j;
630
631 __asm __volatile ("mfsrin %0,%1"
632 : "=r" (j)
633 : "r" (i << ADDR_SR_SHFT));
634
635 kernel_pmap->pm_sr[i] = j;
636 }
637
638 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
639 __asm __volatile ("mtsr %0,%1"
640 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
641
642 __asm __volatile ("sync; mtsdr1 %0; isync"
643 :: "r"((u_int)ptable | (ptab_mask >> 10)));
644
645 tlbia();
646
647 virtual_avail = VM_MIN_KERNEL_ADDRESS;
648 virtual_end = VM_MAX_KERNEL_ADDRESS;
649}
650
651/*
652 * Initialize anything else for pmap handling.
653 * Called during vm_init().
654 */
655void
656pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
657{
658 int initial_pvs;
659
660 /*
661 * init the pv free list
662 */
663 initial_pvs = vm_page_array_size;
664 if (initial_pvs < MINPV) {
665 initial_pvs = MINPV;
666 }
667 pvzone = &pvzone_store;
668 pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
669 initial_pvs * sizeof(struct pv_entry));
670 zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
671 vm_page_array_size);
672
673 pmap_initialized = TRUE;
674}
675
676/*
677 * Initialize a preallocated and zeroed pmap structure.
678 */
679void
680pmap_pinit(struct pmap *pm)
681{
682 int i, j;
683
684 /*
685 * Allocate some segment registers for this pmap.
686 */
687 pm->pm_refs = 1;
688 for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
689 if (usedsr[i] != 0xffffffff) {
690 j = ffs(~usedsr[i]) - 1;
691 usedsr[i] |= 1 << j;
692 pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
693 for (i = 1; i < 16; i++) {
694 pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
695 }
696 return;
697 }
698 }
699 panic("out of segments");
700}
701
702void
703pmap_pinit2(pmap_t pmap)
704{
705
706 /*
707 * Nothing to be done.
708 */
709 return;
710}
711
712/*
713 * Add a reference to the given pmap.
714 */
715void
716pmap_reference(struct pmap *pm)
717{
718
719 pm->pm_refs++;
720}
721
722/*
723 * Retire the given pmap from service.
724 * Should only be called if the map contains no valid mappings.
725 */
726void
727pmap_destroy(struct pmap *pm)
728{
729
730 if (--pm->pm_refs == 0) {
731 pmap_release(pm);
732 free((caddr_t)pm, M_VMPGDATA);
733 }
734}
735
736/*
737 * Release any resources held by the given physical map.
738 * Called when a pmap initialized by pmap_pinit is being released.
739 */
740void
741pmap_release(struct pmap *pm)
742{
743 int i, j;
744
745 if (!pm->pm_sr[0]) {
746 panic("pmap_release");
747 }
748 i = pm->pm_sr[0] / 16;
749 j = i % (sizeof usedsr[0] * 8);
750 i /= sizeof usedsr[0] * 8;
751 usedsr[i] &= ~(1 << j);
752}
753
754/*
755 * Copy the range specified by src_addr/len
756 * from the source map to the range dst_addr/len
757 * in the destination map.
758 *
759 * This routine is only advisory and need not do anything.
760 */
761void
762pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
763 vm_size_t len, vm_offset_t src_addr)
764{
765
766 return;
767}
768
769/*
770 * Garbage collects the physical map system for
771 * pages which are no longer used.
772 * Success need not be guaranteed -- that is, there
773 * may well be pages which are not referenced, but
774 * others may be collected.
775 * Called by the pageout daemon when pages are scarce.
776 */
777void
778pmap_collect(void)
779{
780
781 return;
782}
783
784/*
785 * Fill the given physical page with zeroes.
786 */
787void
788pmap_zero_page(vm_offset_t pa)
789{
790#if 0
791 bzero((caddr_t)pa, PAGE_SIZE);
792#else
793 int i;
794
795 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
796 __asm __volatile ("dcbz 0,%0" :: "r"(pa));
797 pa += CACHELINESIZE;
798 }
799#endif
800}
801
802void
803pmap_zero_page_area(vm_offset_t pa, int off, int size)
804{
805
806 bzero((caddr_t)pa + off, size);
807}
808
809/*
810 * Copy the given physical source page to its destination.
811 */
812void
813pmap_copy_page(vm_offset_t src, vm_offset_t dst)
814{
815
816 bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
817}
818
819static struct pv_entry *
820pmap_alloc_pv()
821{
822 pv_entry_count++;
823
824 if (pv_entry_high_water &&
825 (pv_entry_count > pv_entry_high_water) &&
826 (pmap_pagedaemon_waken == 0)) {
827 pmap_pagedaemon_waken = 1;
828 wakeup(&vm_pages_needed);
829 }
830
831 return zalloc(pvzone);
832}
833
834static void
835pmap_free_pv(struct pv_entry *pv)
836{
837
838 pv_entry_count--;
839 zfree(pvzone, pv);
840}
841
842/*
843 * We really hope that we don't need overflow entries
844 * before the VM system is initialized!
845 *
846 * XXX: Should really be switched over to the zone allocator.
847 */
848static struct pte_ovfl *
849poalloc()
850{
851 struct po_page *pop;
852 struct pte_ovfl *po;
853 vm_page_t mem;
854 int i;
855
856 if (!pmap_initialized) {
857 panic("poalloc");
858 }
859
860 if (po_nfree == 0) {
861 /*
862 * Since we cannot use maps for potable allocation,
863 * we have to steal some memory from the VM system. XXX
864 */
865 mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
866 po_pcnt++;
867 pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
868 pop->pop_pgi.pgi_page = mem;
869 LIST_INIT(&pop->pop_pgi.pgi_freelist);
870 for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
871 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
872 po_list);
873 }
874 po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
875 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
876 po = pop->pop_po;
877 } else {
878 po_nfree--;
879 pop = po_page_freelist.lh_first;
880 if (--pop->pop_pgi.pgi_nfree <= 0) {
881 LIST_REMOVE(pop, pop_pgi.pgi_list);
882 }
883 po = pop->pop_pgi.pgi_freelist.lh_first;
884 LIST_REMOVE(po, po_list);
885 }
886
887 return po;
888}
889
890static void
891pofree(struct pte_ovfl *po, int freepage)
892{
893 struct po_page *pop;
894
895 pop = (struct po_page *)trunc_page((vm_offset_t)po);
896 switch (++pop->pop_pgi.pgi_nfree) {
897 case NPOPPG:
898 if (!freepage) {
899 break;
900 }
901 po_nfree -= NPOPPG - 1;
902 po_pcnt--;
903 LIST_REMOVE(pop, pop_pgi.pgi_list);
904 vm_page_free(pop->pop_pgi.pgi_page);
905 return;
906 case 1:
907 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
908 default:
909 break;
910 }
911 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
912 po_nfree++;
913}
914
915/*
916 * This returns whether this is the first mapping of a page.
917 */
918static int
919pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
920{
921 struct pv_entry *pv, *npv;
922 int s, first;
923
924 if (!pmap_initialized) {
925 return 0;
926 }
927
928 s = splimp();
929
930 pv = pa_to_pv(pa);
931 first = pv->pv_idx;
932 if (pv->pv_idx == -1) {
933 /*
934 * No entries yet, use header as the first entry.
935 */
936 pv->pv_va = va;
937 pv->pv_idx = pteidx;
938 pv->pv_next = NULL;
939 } else {
940 /*
941 * There is at least one other VA mapping this page.
942 * Place this entry after the header.
943 */
944 npv = pmap_alloc_pv();
945 npv->pv_va = va;
946 npv->pv_idx = pteidx;
947 npv->pv_next = pv->pv_next;
948 pv->pv_next = npv;
949 }
950 splx(s);
951 return first;
952}
953
954static void
955pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
956{
957 struct pv_entry *pv, *npv;
958 char *attr;
959
960 /*
961 * First transfer reference/change bits to cache.
962 */
963 attr = pa_to_attr(pa);
964 if (attr == NULL) {
965 return;
966 }
967 *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
968
969 /*
970 * Remove from the PV table.
971 */
972 pv = pa_to_pv(pa);
973
974 /*
975 * If it is the first entry on the list, it is actually
976 * in the header and we must copy the following entry up
977 * to the header. Otherwise we must search the list for
978 * the entry. In either case we free the now unused entry.
979 */
980 if (pteidx == pv->pv_idx && va == pv->pv_va) {
981 npv = pv->pv_next;
982 if (npv) {
983 *pv = *npv;
984 pmap_free_pv(npv);
985 } else {
986 pv->pv_idx = -1;
987 }
988 } else {
989 for (; (npv = pv->pv_next); pv = npv) {
990 if (pteidx == npv->pv_idx && va == npv->pv_va) {
991 break;
992 }
993 }
994 if (npv) {
995 pv->pv_next = npv->pv_next;
996 pmap_free_pv(npv);
997 }
998#ifdef DIAGNOSTIC
999 else {
1000 panic("pmap_remove_pv: not on list\n");
1001 }
1002#endif
1003 }
1004}
1005
1006/*
1007 * Insert physical page at pa into the given pmap at virtual address va.
1008 */
1009void
1010pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
1011 boolean_t wired)
1012{
1013 sr_t sr;
1014 int idx, s;
1015 pte_t pte;
1016 struct pte_ovfl *po;
1017 struct mem_region *mp;
1018 vm_offset_t pa;
1019
1020 pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
1021
1022 /*
1023 * Have to remove any existing mapping first.
1024 */
1025 pmap_remove(pm, va, va + PAGE_SIZE);
1026
1027 /*
1028 * Compute the HTAB index.
1029 */
1030 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1031 /*
1032 * Construct the PTE.
1033 *
1034 * Note: Don't set the valid bit for correct operation of tlb update.
1035 */
1036 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
1037 | ((va & ADDR_PIDX) >> ADDR_API_SHFT);
1038 pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
1039
1040 for (mp = mem; mp->size; mp++) {
1041 if (pa >= mp->start && pa < mp->start + mp->size) {
1042 pte.pte_lo &= ~(PTE_I | PTE_G);
1043 break;
1044 }
1045 }
1046 if (prot & VM_PROT_WRITE) {
1047 pte.pte_lo |= PTE_RW;
1048 } else {
1049 pte.pte_lo |= PTE_RO;
1050 }
1051
1052 /*
1053 * Now record mapping for later back-translation.
1054 */
1055 if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
1056 if (pmap_enter_pv(idx, va, pa)) {
1057 /*
1058 * Flush the real memory from the cache.
1059 */
1060 __syncicache((void *)pa, PAGE_SIZE);
1061 }
1062 }
1063
1064 s = splimp();
1065 pm->pm_stats.resident_count++;
1066 /*
1067 * Try to insert directly into HTAB.
1068 */
1069 if (pte_insert(idx, &pte)) {
1070 splx(s);
1071 return;
1072 }
1073
1074 /*
1075 * Have to allocate overflow entry.
1076 *
1077 * Note, that we must use real addresses for these.
1078 */
1079 po = poalloc();
1080 po->po_pte = pte;
1081 LIST_INSERT_HEAD(potable + idx, po, po_list);
1082 splx(s);
1083}
1084
1085void
1086pmap_kenter(vm_offset_t va, vm_offset_t pa)
1087{
1088 struct vm_page pg;
1089
1090 pg.phys_addr = pa;
1091 pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
1092}
1093
1094void
1095pmap_kremove(vm_offset_t va)
1096{
1097 pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
1098}
1099
1100/*
1101 * Remove the given range of mapping entries.
1102 */
1103void
1104pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
1105{
1106 int idx, i, s;
1107 sr_t sr;
1108 pte_t *ptp;
1109 struct pte_ovfl *po, *npo;
1110
1111 s = splimp();
1112 while (va < endva) {
1113 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1114 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1115 if (ptematch(ptp, sr, va, PTE_VALID)) {
1116 pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1117 ptp->pte_hi &= ~PTE_VALID;
1118 __asm __volatile ("sync");
1119 tlbie(va);
1120 tlbsync();
1121 pm->pm_stats.resident_count--;
1122 }
1123 }
1124 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1125 ptp++) {
1126 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1127 pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1128 ptp->pte_hi &= ~PTE_VALID;
1129 __asm __volatile ("sync");
1130 tlbie(va);
1131 tlbsync();
1132 pm->pm_stats.resident_count--;
1133 }
1134 }
1135 for (po = potable[idx].lh_first; po; po = npo) {
1136 npo = po->po_list.le_next;
1137 if (ptematch(&po->po_pte, sr, va, 0)) {
1138 pmap_remove_pv(idx, va, po->po_pte.pte_lo,
1139 &po->po_pte);
1140 LIST_REMOVE(po, po_list);
1141 pofree(po, 1);
1142 pm->pm_stats.resident_count--;
1143 }
1144 }
1145 va += PAGE_SIZE;
1146 }
1147 splx(s);
1148}
1149
1150static pte_t *
1151pte_find(struct pmap *pm, vm_offset_t va)
1152{
1153 int idx, i;
1154 sr_t sr;
1155 pte_t *ptp;
1156 struct pte_ovfl *po;
1157
1158 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1159 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1160 if (ptematch(ptp, sr, va, PTE_VALID)) {
1161 return ptp;
1162 }
1163 }
1164 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
1165 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1166 return ptp;
1167 }
1168 }
1169 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
1170 if (ptematch(&po->po_pte, sr, va, 0)) {
1171 return &po->po_pte;
1172 }
1173 }
1174 return 0;
1175}
1176
1177/*
1178 * Get the physical page address for the given pmap/virtual address.
1179 */
1180vm_offset_t
1181pmap_extract(pmap_t pm, vm_offset_t va)
1182{
1183 pte_t *ptp;
1184 int s;
1185
1186 s = splimp();
1187
1188 if (!(ptp = pte_find(pm, va))) {
1189 splx(s);
1190 return (0);
1191 }
1192 splx(s);
1193 return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1194}
1195
1196/*
1197 * Lower the protection on the specified range of this pmap.
1198 *
1199 * There are only two cases: either the protection is going to 0,
1200 * or it is going to read-only.
1201 */
1202void
1203pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1204{
1205 pte_t *ptp;
1206 int valid, s;
1207
1208 if (prot & VM_PROT_READ) {
1209 s = splimp();
1210 while (sva < eva) {
1211 ptp = pte_find(pm, sva);
1212 if (ptp) {
1213 valid = ptp->pte_hi & PTE_VALID;
1214 ptp->pte_hi &= ~PTE_VALID;
1215 __asm __volatile ("sync");
1216 tlbie(sva);
1217 tlbsync();
1218 ptp->pte_lo &= ~PTE_PP;
1219 ptp->pte_lo |= PTE_RO;
1220 __asm __volatile ("sync");
1221 ptp->pte_hi |= valid;
1222 }
1223 sva += PAGE_SIZE;
1224 }
1225 splx(s);
1226 return;
1227 }
1228 pmap_remove(pm, sva, eva);
1229}
1230
1231boolean_t
1232ptemodify(vm_page_t pg, u_int mask, u_int val)
1233{
1234 vm_offset_t pa;
1235 struct pv_entry *pv;
1236 pte_t *ptp;
1237 struct pte_ovfl *po;
1238 int i, s;
1239 char *attr;
1240 int rv;
1241
1242 pa = VM_PAGE_TO_PHYS(pg);
1243
1244 /*
1245 * First modify bits in cache.
1246 */
1247 attr = pa_to_attr(pa);
1248 if (attr == NULL) {
1249 return FALSE;
1250 }
1251
1252 *attr &= ~mask >> ATTRSHFT;
1253 *attr |= val >> ATTRSHFT;
1254
1255 pv = pa_to_pv(pa);
1256 if (pv->pv_idx < 0) {
1257 return FALSE;
1258 }
1259
1260 rv = FALSE;
1261 s = splimp();
1262 for (; pv; pv = pv->pv_next) {
1263 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1264 if ((ptp->pte_hi & PTE_VALID)
1265 && (ptp->pte_lo & PTE_RPGN) == pa) {
1266 ptp->pte_hi &= ~PTE_VALID;
1267 __asm __volatile ("sync");
1268 tlbie(pv->pv_va);
1269 tlbsync();
1270 rv |= ptp->pte_lo & mask;
1271 ptp->pte_lo &= ~mask;
1272 ptp->pte_lo |= val;
1273 __asm __volatile ("sync");
1274 ptp->pte_hi |= PTE_VALID;
1275 }
1276 }
1277 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1278 --i >= 0; ptp++) {
1279 if ((ptp->pte_hi & PTE_VALID)
1280 && (ptp->pte_lo & PTE_RPGN) == pa) {
1281 ptp->pte_hi &= ~PTE_VALID;
1282 __asm __volatile ("sync");
1283 tlbie(pv->pv_va);
1284 tlbsync();
1285 rv |= ptp->pte_lo & mask;
1286 ptp->pte_lo &= ~mask;
1287 ptp->pte_lo |= val;
1288 __asm __volatile ("sync");
1289 ptp->pte_hi |= PTE_VALID;
1290 }
1291 }
1292 for (po = potable[pv->pv_idx].lh_first; po;
1293 po = po->po_list.le_next) {
1294 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1295 rv |= ptp->pte_lo & mask;
1296 po->po_pte.pte_lo &= ~mask;
1297 po->po_pte.pte_lo |= val;
1298 }
1299 }
1300 }
1301 splx(s);
1302 return rv != 0;
1303}
1304
1305int
1306ptebits(vm_page_t pg, int bit)
1307{
1308 struct pv_entry *pv;
1309 pte_t *ptp;
1310 struct pte_ovfl *po;
1311 int i, s, bits;
1312 char *attr;
1313 vm_offset_t pa;
1314
1315 bits = 0;
1316 pa = VM_PAGE_TO_PHYS(pg);
1317
1318 /*
1319 * First try the cache.
1320 */
1321 attr = pa_to_attr(pa);
1322 if (attr == NULL) {
1323 return 0;
1324 }
1325 bits |= (*attr << ATTRSHFT) & bit;
1326 if (bits == bit) {
1327 return bits;
1328 }
1329
1330 pv = pa_to_pv(pa);
1331 if (pv->pv_idx < 0) {
1332 return 0;
1333 }
1334
1335 s = splimp();
1336 for (; pv; pv = pv->pv_next) {
1337 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1338 if ((ptp->pte_hi & PTE_VALID)
1339 && (ptp->pte_lo & PTE_RPGN) == pa) {
1340 bits |= ptp->pte_lo & bit;
1341 if (bits == bit) {
1342 splx(s);
1343 return bits;
1344 }
1345 }
1346 }
1347 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1348 --i >= 0; ptp++) {
1349 if ((ptp->pte_hi & PTE_VALID)
1350 && (ptp->pte_lo & PTE_RPGN) == pa) {
1351 bits |= ptp->pte_lo & bit;
1352 if (bits == bit) {
1353 splx(s);
1354 return bits;
1355 }
1356 }
1357 }
1358 for (po = potable[pv->pv_idx].lh_first; po;
1359 po = po->po_list.le_next) {
1360 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1361 bits |= po->po_pte.pte_lo & bit;
1362 if (bits == bit) {
1363 splx(s);
1364 return bits;
1365 }
1366 }
1367 }
1368 }
1369 splx(s);
1370 return bits;
1371}
1372
1373/*
1374 * Lower the protection on the specified physical page.
1375 *
1376 * There are only two cases: either the protection is going to 0,
1377 * or it is going to read-only.
1378 */
1379void
1380pmap_page_protect(vm_page_t m, vm_prot_t prot)
1381{
1382 vm_offset_t pa;
1383 vm_offset_t va;
1384 pte_t *ptp;
1385 struct pte_ovfl *po, *npo;
1386 int i, s, idx;
1387 struct pv_entry *pv;
1388
1389 pa = VM_PAGE_TO_PHYS(m);
1390
1391 pa &= ~ADDR_POFF;
1392 if (prot & VM_PROT_READ) {
1393 ptemodify(m, PTE_PP, PTE_RO);
1394 return;
1395 }
1396
1397 pv = pa_to_pv(pa);
1398 if (pv == NULL) {
1399 return;
1400 }
1401
1402 s = splimp();
1403 while (pv->pv_idx >= 0) {
1404 idx = pv->pv_idx;
1405 va = pv->pv_va;
1406 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1407 if ((ptp->pte_hi & PTE_VALID)
1408 && (ptp->pte_lo & PTE_RPGN) == pa) {
1409 pmap_remove_pv(idx, va, pa, ptp);
1410 ptp->pte_hi &= ~PTE_VALID;
1411 __asm __volatile ("sync");
1412 tlbie(va);
1413 tlbsync();
1414 goto next;
1415 }
1416 }
1417 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1418 ptp++) {
1419 if ((ptp->pte_hi & PTE_VALID)
1420 && (ptp->pte_lo & PTE_RPGN) == pa) {
1421 pmap_remove_pv(idx, va, pa, ptp);
1422 ptp->pte_hi &= ~PTE_VALID;
1423 __asm __volatile ("sync");
1424 tlbie(va);
1425 tlbsync();
1426 goto next;
1427 }
1428 }
1429 for (po = potable[idx].lh_first; po; po = npo) {
1430 npo = po->po_list.le_next;
1431 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1432 pmap_remove_pv(idx, va, pa, &po->po_pte);
1433 LIST_REMOVE(po, po_list);
1434 pofree(po, 1);
1435 goto next;
1436 }
1437 }
1438next:
1439 }
1440 splx(s);
1441}
1442
1443/*
1444 * Activate the address space for the specified process. If the process
1445 * is the current process, load the new MMU context.
1446 */
1447void
1448pmap_activate(struct thread *td)
1449{
1450 struct pcb *pcb;
1451 pmap_t pmap;
1452 pmap_t rpm;
1453 int psl, i, ksr, seg;
1454
1455 pcb = td->td_pcb;
1456 pmap = vmspace_pmap(td->td_proc->p_vmspace);
1457
1458 /*
1459 * XXX Normally performed in cpu_fork().
1460 */
1461 if (pcb->pcb_pm != pmap) {
1462 pcb->pcb_pm = pmap;
1463 (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
1464 (vm_offset_t)pcb->pcb_pm);
1465 }
1466
1467 if (td == curthread) {
1468 /* Disable interrupts while switching. */
1469 psl = mfmsr();
1470 mtmsr(psl & ~PSL_EE);
1471
1472#if 0 /* XXX */
1473 /* Store pointer to new current pmap. */
1474 curpm = pcb->pcb_pmreal;
1475#endif
1476
1477 /* Save kernel SR. */
1478 __asm __volatile("mfsr %0,14" : "=r"(ksr) :);
1479
1480 /*
1481 * Set new segment registers. We use the pmap's real
1482 * address to avoid accessibility problems.
1483 */
1484 rpm = pcb->pcb_pmreal;
1485 for (i = 0; i < 16; i++) {
1486 seg = rpm->pm_sr[i];
1487 __asm __volatile("mtsrin %0,%1"
1488 :: "r"(seg), "r"(i << ADDR_SR_SHFT));
1489 }
1490
1491 /* Restore kernel SR. */
1492 __asm __volatile("mtsr 14,%0" :: "r"(ksr));
1493
1494 /* Interrupts are OK again. */
1495 mtmsr(psl);
1496 }
1497}
1498
1499/*
1500 * Add a list of wired pages to the kva
1501 * this routine is only used for temporary
1502 * kernel mappings that do not need to have
1503 * page modification or references recorded.
1504 * Note that old mappings are simply written
1505 * over. The page *must* be wired.
1506 */
1507void
1508pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1509{
1510 int i;
1511
1512 for (i = 0; i < count; i++) {
1513 vm_offset_t tva = va + i * PAGE_SIZE;
1514 pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
1515 }
1516}
1517
1518/*
1519 * this routine jerks page mappings from the
1520 * kernel -- it is meant only for temporary mappings.
1521 */
1522void
1523pmap_qremove(vm_offset_t va, int count)
1524{
1525 vm_offset_t end_va;
1526
1527 end_va = va + count*PAGE_SIZE;
1528
1529 while (va < end_va) {
1530 unsigned *pte;
1531
1532 pte = (unsigned *)vtopte(va);
1533 *pte = 0;
1534 tlbie(va);
1535 va += PAGE_SIZE;
1536 }
1537}
1538
1539/*
1540 * pmap_ts_referenced:
1541 *
1542 * Return the count of reference bits for a page, clearing all of them.
1543 */
1544int
1545pmap_ts_referenced(vm_page_t m)
1546{
1547
1548 /* XXX: coming soon... */
1549 return (0);
1550}
1551
1552/*
1553 * this routine returns true if a physical page resides
1554 * in the given pmap.
1555 */
1556boolean_t
1557pmap_page_exists(pmap_t pmap, vm_page_t m)
1558{
1559#if 0 /* XXX: This must go! */
1560 register pv_entry_t pv;
1561 int s;
1562
1563 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1564 return FALSE;
1565
1566 s = splvm();
1567
1568 /*
1569 * Not found, check current mappings returning immediately if found.
1570 */
1571 for (pv = pv_table; pv; pv = pv->pv_next) {
1572 if (pv->pv_pmap == pmap) {
1573 splx(s);
1574 return TRUE;
1575 }
1576 }
1577 splx(s);
1578#endif
1579 return (FALSE);
1580}
1581
1582/*
1583 * Used to map a range of physical addresses into kernel
1584 * virtual address space.
1585 *
1586 * For now, VM is already on, we only need to map the
1587 * specified memory.
1588 */
1589vm_offset_t
1590pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
1591{
1592 vm_offset_t sva, va;
1593
1594 sva = *virt;
1595 va = sva;
1596
1597 while (start < end) {
1598 pmap_kenter(va, start);
1599 va += PAGE_SIZE;
1600 start += PAGE_SIZE;
1601 }
1602
1603 *virt = va;
1604 return (sva);
1605}
1606
1607vm_offset_t
1608pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
1609{
1610
1611 return (addr);
1612}
1613
1614int
1615pmap_mincore(pmap_t pmap, vm_offset_t addr)
1616{
1617
1618 /* XXX: coming soon... */
1619 return (0);
1620}
1621
1622void
1623pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1624 vm_pindex_t pindex, vm_size_t size, int limit)
1625{
1626
1627 /* XXX: coming soon... */
1628 return;
1629}
1630
1631void
1632pmap_growkernel(vm_offset_t addr)
1633{
1634
1635 /* XXX: coming soon... */
1636 return;
1637}
1638
1639/*
1640 * Initialize the address space (zone) for the pv_entries. Set a
1641 * high water mark so that the system can recover from excessive
1642 * numbers of pv entries.
1643 */
1644void
1645pmap_init2()
1646{
1647 int shpgperproc = PMAP_SHPGPERPROC;
1648
1649 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1650 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1651 pv_entry_high_water = 9 * (pv_entry_max / 10);
1652 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
1653}
1654
1655void
1656pmap_swapin_proc(struct proc *p)
1657{
1658
1659 /* XXX: coming soon... */
1660 return;
1661}
1662
1663void
1664pmap_swapout_proc(struct proc *p)
1665{
1666
1667 /* XXX: coming soon... */
1668 return;
1669}
1670
1671
1672/*
1673 * Create the kernel stack (including pcb for i386) for a new thread.
1674 * This routine directly affects the fork perf for a process and
1675 * create performance for a thread.
1676 */
1677void
1678pmap_new_thread(td)
1679 struct thread *td;
1680{
1681 /* XXX: coming soon... */
1682 return;
1683}
1684
1685/*
1686 * Dispose the kernel stack for a thread that has exited.
1687 * This routine directly impacts the exit perf of a process and thread.
1688 */
1689void
1690pmap_dispose_thread(td)
1691 struct thread *td;
1692{
1693 /* XXX: coming soon... */
1694 return;
1695}
1696
1697/*
1698 * Allow the Kernel stack for a thread to be prejudicially paged out.
1699 */
1700void
1701pmap_swapout_thread(td)
1702 struct thread *td;
1703{
1704 int i;
1705 vm_object_t ksobj;
1706 vm_offset_t ks;
1707 vm_page_t m;
1708
1709 ksobj = td->td_kstack_obj;
1710 ks = td->td_kstack;
1711 for (i = 0; i < KSTACK_PAGES; i++) {
1712 m = vm_page_lookup(ksobj, i);
1713 if (m == NULL)
1714 panic("pmap_swapout_thread: kstack already missing?");
1715 vm_page_dirty(m);
1716 vm_page_unwire(m, 0);
1717 pmap_kremove(ks + i * PAGE_SIZE);
1718 }
1719}
1720
1721/*
1722 * Bring the kernel stack for a specified thread back in.
1723 */
1724void
1725pmap_swapin_thread(td)
1726 struct thread *td;
1727{
1728 int i, rv;
1729 vm_object_t ksobj;
1730 vm_offset_t ks;
1731 vm_page_t m;
1732
1733 ksobj = td->td_kstack_obj;
1734 ks = td->td_kstack;
1735 for (i = 0; i < KSTACK_PAGES; i++) {
1736 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1737 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1738 if (m->valid != VM_PAGE_BITS_ALL) {
1739 rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1740 if (rv != VM_PAGER_OK)
1741 panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
1742 m = vm_page_lookup(ksobj, i);
1743 m->valid = VM_PAGE_BITS_ALL;
1744 }
1745 vm_page_wire(m);
1746 vm_page_wakeup(m);
1747 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1748 }
1749}
1750
1751void
1752pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
1753{
1754
1755 return;
1756}
1757
1758void
1759pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
1760{
1761
1762 /* XXX: coming soon... */
1763 return;
1764}
1765
1766void
1767pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
1768{
1769
1770 /* XXX: coming soon... */
1771 return;
1772}
1773
1774void
1775pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1776{
1777
1778 /* XXX: coming soon... */
1779 return;
1780}
1781
1782void
1783pmap_pinit0(pmap_t pmap)
1784{
1785
1786 /* XXX: coming soon... */
1787 return;
1788}
1789
1790void
1791pmap_dispose_proc(struct proc *p)
1792{
1793
1794 /* XXX: coming soon... */
1795 return;
1796}
1797
1798vm_offset_t
1799pmap_steal_memory(vm_size_t size)
1800{
1801 vm_size_t bank_size;
1802 vm_offset_t pa;
1803
1804 size = round_page(size);
1805
1806 bank_size = phys_avail[1] - phys_avail[0];
1807 while (size > bank_size) {
1808 int i;
1809 for (i = 0; phys_avail[i+2]; i+= 2) {
1810 phys_avail[i] = phys_avail[i+2];
1811 phys_avail[i+1] = phys_avail[i+3];
1812 }
1813 phys_avail[i] = 0;
1814 phys_avail[i+1] = 0;
1815 if (!phys_avail[0])
1816 panic("pmap_steal_memory: out of memory");
1817 bank_size = phys_avail[1] - phys_avail[0];
1818 }
1819
1820 pa = phys_avail[0];
1821 phys_avail[0] += size;
1822
1823 bzero((caddr_t) pa, size);
1824 return pa;
1825}
1826
1827/*
1828 * Create the UAREA_PAGES for a new process.
1829 * This routine directly affects the fork perf for a process.
1830 */
1831void
1832pmap_new_proc(struct proc *p)
1833{
1834 int i;
1835 vm_object_t upobj;
1836 vm_offset_t up;
1837 vm_page_t m;
1838 pte_t pte;
1839 sr_t sr;
1840 int idx;
1841 vm_offset_t va;
1842
1843 /*
1844 * allocate object for the upages
1845 */
1846 upobj = p->p_upages_obj;
1847 if (upobj == NULL) {
1848 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1849 p->p_upages_obj = upobj;
1850 }
1851
1852 /* get a kernel virtual address for the UAREA_PAGES for this proc */
1853 up = (vm_offset_t)p->p_uarea;
1854 if (up == 0) {
1855 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1856 if (up == 0)
1857 panic("pmap_new_proc: upage allocation failed");
1858 p->p_uarea = (struct user *)up;
1859 }
1860
1861 for (i = 0; i < UAREA_PAGES; i++) {
1862 /*
1863 * Get a kernel stack page
1864 */
1865 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1866
1867 /*
1868 * Wire the page
1869 */
1870 m->wire_count++;
1871 cnt.v_wire_count++;
1872
1873 /*
1874 * Enter the page into the kernel address space.
1875 */
1876 va = up + i * PAGE_SIZE;
1877 idx = pteidx(sr = ptesr(kernel_pmap->pm_sr, va), va);
1878
1879 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) |
1880 ((va & ADDR_PIDX) >> ADDR_API_SHFT);
1881 pte.pte_lo = (VM_PAGE_TO_PHYS(m) & PTE_RPGN) | PTE_M | PTE_I |
1882 PTE_G | PTE_RW;
1883
1884 if (!pte_insert(idx, &pte)) {
1885 struct pte_ovfl *po;
1886
1887 po = poalloc();
1888 po->po_pte = pte;
1889 LIST_INSERT_HEAD(potable + idx, po, po_list);
1890 }
1891
1892 tlbie(va);
1893
1894 vm_page_wakeup(m);
1895 vm_page_flag_clear(m, PG_ZERO);
1896 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1897 m->valid = VM_PAGE_BITS_ALL;
1898 }
1899}
1900
1901void *
1902pmap_mapdev(vm_offset_t pa, vm_size_t len)
1903{
1904 vm_offset_t faddr;
1905 vm_offset_t taddr, va;
1906 int off;
1907
1908 faddr = trunc_page(pa);
1909 off = pa - faddr;
1910 len = round_page(off + len);
1911
1912 GIANT_REQUIRED;
1913
1914 va = taddr = kmem_alloc_pageable(kernel_map, len);
1915
1916 if (va == 0)
1917 return NULL;
1918
1919 for (; len > 0; len -= PAGE_SIZE) {
1920 pmap_kenter(taddr, faddr);
1921 faddr += PAGE_SIZE;
1922 taddr += PAGE_SIZE;
1923 }
1924
1925 return (void *)(va + off);
1926}