1/*
|
2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/* |
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/* 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#ifndef lint 94static const char rcsid[] =
|
60 "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 85201 2001-10-19 22:45:46Z mp $";
|
95 "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 90643 2002-02-14 01:39:11Z benno $"; |
96#endif /* not lint */ 97
|
98/* 99 * Manages physical address maps. 100 * 101 * In addition to hardware address maps, this module is called upon to 102 * provide software-use-only maps which may or may not be stored in the 103 * same form as hardware maps. These pseudo-maps are used to store 104 * intermediate results from copy operations to and from address spaces. 105 * 106 * Since the information managed by this module is also stored by the 107 * logical address mapping module, this module may throw away valid virtual 108 * to physical mappings at almost any time. However, invalidations of 109 * mappings must be done as requested. 110 * 111 * In order to cope with hardware architectures which make virtual to 112 * physical map invalidates expensive, this module may delay invalidate 113 * reduced protection operations until such time as they are actually 114 * necessary. This module is given full information as to which processors 115 * are currently using which maps, and to when physical maps must be made 116 * correct. 117 */ 118 |
119#include <sys/param.h>
|
64#include <sys/systm.h>
|
120#include <sys/kernel.h>
|
66#include <sys/proc.h>
67#include <sys/malloc.h>
68#include <sys/msgbuf.h>
69#include <sys/vmmeter.h>
70#include <sys/mman.h>
71#include <sys/queue.h>
|
121#include <sys/ktr.h> |
122#include <sys/lock.h>
|
123#include <sys/msgbuf.h> |
124#include <sys/mutex.h>
|
125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> |
129
|
75#include <vm/vm.h>
|
130#include <dev/ofw/openfirm.h> 131 132#include <vm/vm.h> |
133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/vm_zone.h> 142
|
86#include <sys/user.h>
87
|
143#include <machine/bat.h>
|
89#include <machine/pcb.h>
90#include <machine/powerpc.h>
|
144#include <machine/frame.h> 145#include <machine/md_var.h> 146#include <machine/psl.h> |
147#include <machine/pte.h>
|
148#include <machine/sr.h> |
149
|
93pte_t *ptable;
94int ptab_cnt;
95u_int ptab_mask;
96#define HTABSIZE (ptab_cnt * 64)
|
150#define PMAP_DEBUG |
151
|
98#define MINPV 2048
|
152#define TODO panic("%s: not implemented", __func__); |
153
|
100struct pte_ovfl {
101 LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */
102 struct pte po_pte; /* PTE for this mapping */
|
154#define PMAP_LOCK(pm) 155#define PMAP_UNLOCK(pm) 156 157#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 158#define TLBSYNC() __asm __volatile("tlbsync"); 159#define SYNC() __asm __volatile("sync"); 160#define EIEIO() __asm __volatile("eieio"); 161 162#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 163#define VSID_TO_SR(vsid) ((vsid) & 0xf) 164#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 165 166#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 167#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 168#define PVO_WIRED 0x0010 /* PVO entry is wired */ 169#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 170#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 171#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 172#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 173#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 174#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 175#define PVO_PTEGIDX_CLR(pvo) \ 176 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 177#define PVO_PTEGIDX_SET(pvo, i) \ 178 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 179 180#define PMAP_PVO_CHECK(pvo) 181 182struct mem_region { 183 vm_offset_t mr_start; 184 vm_offset_t mr_size; |
185}; 186
|
105LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */
|
187struct ofw_map { 188 vm_offset_t om_va; 189 vm_size_t om_len; 190 vm_offset_t om_pa; 191 u_int om_mode; 192}; |
193
|
107static struct pmap kernel_pmap_store;
108pmap_t kernel_pmap;
|
194int pmap_bootstrapped = 0; |
195
|
110static int npgs;
111static u_int nextavail;
|
196/* 197 * Virtual and physical address of message buffer. 198 */ 199struct msgbuf *msgbufp; 200vm_offset_t msgbuf_phys; |
201
|
113#ifndef MSGBUFADDR
114extern vm_offset_t msgbuf_paddr;
115#endif
|
202/* 203 * Physical addresses of first and last available physical page. 204 */ 205vm_offset_t avail_start; 206vm_offset_t avail_end; |
207
|
117static struct mem_region *mem, *avail;
|
208/* 209 * Map of physical memory regions. 210 */ 211vm_offset_t phys_avail[128]; 212u_int phys_avail_count; 213static struct mem_region regions[128]; 214static struct ofw_map translations[128]; 215static int translations_size; |
216
|
119vm_offset_t avail_start;
120vm_offset_t avail_end;
121vm_offset_t virtual_avail;
122vm_offset_t virtual_end;
|
217/* 218 * First and last available kernel virtual addresses. 219 */ 220vm_offset_t virtual_avail; 221vm_offset_t virtual_end; 222vm_offset_t kernel_vm_end; |
223
|
124vm_offset_t kernel_vm_end;
|
224/* 225 * Kernel pmap. 226 */ 227struct pmap kernel_pmap_store; 228extern struct pmap ofw_pmap; |
229
|
126static int pmap_pagedaemon_waken = 0;
|
230/* 231 * PTEG data. 232 */ 233static struct pteg *pmap_pteg_table; 234u_int pmap_pteg_count; 235u_int pmap_pteg_mask; |
236
|
128extern unsigned int Maxmem;
|
237/* 238 * PVO data. 239 */ 240struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 241struct pvo_head pmap_pvo_kunmanaged = 242 LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 243struct pvo_head pmap_pvo_unmanaged = 244 LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ |
245
|
130#define ATTRSHFT 4
|
246vm_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 247vm_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 248struct vm_zone pmap_upvo_zone_store; 249struct vm_zone pmap_mpvo_zone_store; 250struct vm_object pmap_upvo_zone_obj; 251struct vm_object pmap_mpvo_zone_obj; |
252
|
132struct pv_entry *pv_table;
|
253#define PMAP_PVO_SIZE 1024 254struct pvo_entry pmap_upvo_pool[PMAP_PVO_SIZE]; |
255
|
134static vm_zone_t pvzone;
135static struct vm_zone pvzone_store;
136static struct vm_object pvzone_obj;
137static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
138static struct pv_entry *pvinit;
|
256#define VSID_NBPW (sizeof(u_int32_t) * 8) 257static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; |
258
|
140#if !defined(PMAP_SHPGPERPROC)
141#define PMAP_SHPGPERPROC 200
142#endif
|
259static boolean_t pmap_initialized = FALSE; |
260
|
144struct pv_page;
145struct pv_page_info {
146 LIST_ENTRY(pv_page) pgi_list;
147 struct pv_entry *pgi_freelist;
148 int pgi_nfree;
149};
150#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry))
151struct pv_page {
152 struct pv_page_info pvp_pgi;
153 struct pv_entry pvp_pv[NPVPPG];
154};
155LIST_HEAD(pv_page_list, pv_page) pv_page_freelist;
156int pv_nfree;
157int pv_pcnt;
158static struct pv_entry *pmap_alloc_pv(void);
159static void pmap_free_pv(struct pv_entry *);
|
261/* 262 * Statistics. 263 */ 264u_int pmap_pte_valid = 0; 265u_int pmap_pte_overflow = 0; 266u_int pmap_pte_replacements = 0; 267u_int pmap_pvo_entries = 0; 268u_int pmap_pvo_enter_calls = 0; 269u_int pmap_pvo_remove_calls = 0; 270u_int pmap_pte_spills = 0; 271SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 272 0, ""); 273SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 274 &pmap_pte_overflow, 0, ""); 275SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 276 &pmap_pte_replacements, 0, ""); 277SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 278 0, ""); 279SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 280 &pmap_pvo_enter_calls, 0, ""); 281SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 282 &pmap_pvo_remove_calls, 0, ""); 283SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 284 &pmap_pte_spills, 0, ""); |
285
|
161struct po_page;
162struct po_page_info {
163 LIST_ENTRY(po_page) pgi_list;
164 vm_page_t pgi_page;
165 LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist;
166 int pgi_nfree;
167};
168#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl))
169struct po_page {
170 struct po_page_info pop_pgi;
171 struct pte_ovfl pop_po[NPOPPG];
172};
173LIST_HEAD(po_page_list, po_page) po_page_freelist;
174int po_nfree;
175int po_pcnt;
176static struct pte_ovfl *poalloc(void);
177static void pofree(struct pte_ovfl *, int);
|
286struct pvo_entry *pmap_pvo_zeropage; |
287
|
179static u_int usedsr[NPMAPS / sizeof(u_int) / 8];
|
288vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 289u_int pmap_rkva_count = 4; |
290
|
181static int pmap_initialized;
|
291/* 292 * Allocate physical memory for use in pmap_bootstrap. 293 */ 294static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); |
295
|
183int pte_spill(vm_offset_t);
|
296/* 297 * PTE calls. 298 */ 299static int pmap_pte_insert(u_int, struct pte *); |
300 301/*
|
186 * These small routines may have to be replaced,
187 * if/when we support processors other that the 604.
|
302 * PVO calls. |
303 */
|
189static __inline void
190tlbie(vm_offset_t ea)
191{
|
304static int pmap_pvo_enter(pmap_t, vm_zone_t, struct pvo_head *, 305 vm_offset_t, vm_offset_t, u_int, int); 306static void pmap_pvo_remove(struct pvo_entry *, int); 307static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 308static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); |
309
|
193 __asm __volatile ("tlbie %0" :: "r"(ea));
|
310/* 311 * Utility routines. 312 */ 313static struct pvo_entry *pmap_rkva_alloc(void); 314static void pmap_pa_map(struct pvo_entry *, vm_offset_t, 315 struct pte *, int *); 316static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 317static void pmap_syncicache(vm_offset_t, vm_size_t); 318static boolean_t pmap_query_bit(vm_page_t, int); 319static boolean_t pmap_clear_bit(vm_page_t, int); 320static void tlbia(void); 321 322static __inline int 323va_to_sr(u_int *sr, vm_offset_t va) 324{ 325 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); |
326} 327
|
196static __inline void
197tlbsync(void)
|
328static __inline u_int 329va_to_pteg(u_int sr, vm_offset_t addr) |
330{
|
331 u_int hash; |
332
|
200 __asm __volatile ("sync; tlbsync; sync");
|
333 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 334 ADDR_PIDX_SHFT); 335 return (hash & pmap_pteg_mask); |
336} 337
|
203static __inline void
204tlbia(void)
|
338static __inline struct pvo_head * 339pa_to_pvoh(vm_offset_t pa) |
340{
|
206 vm_offset_t i;
207
208 __asm __volatile ("sync");
209 for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) {
210 tlbie(i);
211 }
212 tlbsync();
|
341 struct vm_page *pg; 342 343 pg = PHYS_TO_VM_PAGE(pa); 344 345 if (pg == NULL) 346 return (&pmap_pvo_unmanaged); 347 348 return (&pg->md.mdpg_pvoh); |
349} 350
|
215static __inline int
216ptesr(sr_t *sr, vm_offset_t addr)
|
351static __inline struct pvo_head * 352vm_page_to_pvoh(vm_page_t m) |
353{ 354
|
219 return sr[(u_int)addr >> ADDR_SR_SHFT];
|
355 return (&m->md.mdpg_pvoh); |
356} 357
|
222static __inline int
223pteidx(sr_t sr, vm_offset_t addr)
|
358static __inline void 359pmap_attr_clear(vm_page_t m, int ptebit) |
360{
|
225 int hash;
226
227 hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT);
228 return hash & ptab_mask;
|
361 362 m->md.mdpg_attrs &= ~ptebit; |
363} 364 365static __inline int
|
232ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which)
|
366pmap_attr_fetch(vm_page_t m) |
367{ 368
|
235 return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) |
236 (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which);
|
369 return (m->md.mdpg_attrs); |
370} 371
|
239static __inline struct pv_entry *
240pa_to_pv(vm_offset_t pa)
|
372static __inline void 373pmap_attr_save(vm_page_t m, int ptebit) |
374{
|
242#if 0 /* XXX */
243 int bank, pg;
|
375
|
245 bank = vm_physseg_find(atop(pa), &pg);
246 if (bank == -1)
247 return NULL;
248 return &vm_physmem[bank].pmseg.pvent[pg];
249#endif
250 return (NULL);
|
376 m->md.mdpg_attrs |= ptebit; |
377} 378
|
253static __inline char *
254pa_to_attr(vm_offset_t pa)
|
379static __inline int 380pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) |
381{
|
256#if 0 /* XXX */
257 int bank, pg;
|
382 if (pt->pte_hi == pvo_pt->pte_hi) 383 return (1); |
384
|
259 bank = vm_physseg_find(atop(pa), &pg);
260 if (bank == -1)
261 return NULL;
262 return &vm_physmem[bank].pmseg.attrs[pg];
263#endif
264 return (NULL);
|
385 return (0); |
386} 387
|
267/*
268 * Try to insert page table entry *pt into the ptable at idx.
269 *
270 * Note: *pt mustn't have PTE_VALID set.
271 * This is done here as required by Book III, 4.12.
272 */
273static int
274pte_insert(int idx, pte_t *pt)
|
388static __inline int 389pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) |
390{
|
276 pte_t *ptp;
277 int i;
|
391 return (pt->pte_hi & ~PTE_VALID) == 392 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 393 ((va >> ADDR_API_SHFT) & PTE_API) | which); 394} |
395
|
396static __inline void 397pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 398{ |
399 /*
|
280 * First try primary hash.
|
400 * Construct a PTE. Default to IMB initially. Valid bit only gets 401 * set when the real pte is set in memory. 402 * 403 * Note: Don't set the valid bit for correct operation of tlb update. |
404 */
|
282 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
283 if (!(ptp->pte_hi & PTE_VALID)) {
284 *ptp = *pt;
285 ptp->pte_hi &= ~PTE_HID;
286 __asm __volatile ("sync");
287 ptp->pte_hi |= PTE_VALID;
288 return 1;
289 }
290 }
|
405 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 406 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 407 pt->pte_lo = pte_lo; 408} |
409
|
292 /*
293 * Then try secondary hash.
294 */
|
410static __inline void 411pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 412{ |
413
|
296 idx ^= ptab_mask;
|
414 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 415} |
416
|
298 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
299 if (!(ptp->pte_hi & PTE_VALID)) {
300 *ptp = *pt;
301 ptp->pte_hi |= PTE_HID;
302 __asm __volatile ("sync");
303 ptp->pte_hi |= PTE_VALID;
304 return 1;
305 }
306 }
|
417static __inline void 418pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 419{ |
420
|
308 return 0;
|
421 /* 422 * As shown in Section 7.6.3.2.3 423 */ 424 pt->pte_lo &= ~ptebit; 425 TLBIE(va); 426 EIEIO(); 427 TLBSYNC(); 428 SYNC(); |
429} 430
|
311/*
312 * Spill handler.
313 *
314 * Tries to spill a page table entry from the overflow area.
315 * Note that this routine runs in real mode on a separate stack,
316 * with interrupts disabled.
317 */
318int
319pte_spill(vm_offset_t addr)
|
431static __inline void 432pmap_pte_set(struct pte *pt, struct pte *pvo_pt) |
433{
|
321 int idx, i;
322 sr_t sr;
323 struct pte_ovfl *po;
324 pte_t ps;
325 pte_t *pt;
|
434
|
327 __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr));
328 idx = pteidx(sr, addr);
329 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
330 if (ptematch(&po->po_pte, sr, addr, 0)) {
331 /*
332 * Now found an entry to be spilled into the real
333 * ptable.
334 */
335 if (pte_insert(idx, &po->po_pte)) {
336 LIST_REMOVE(po, po_list);
337 pofree(po, 0);
338 return 1;
339 }
340 /*
341 * Have to substitute some entry. Use the primary
342 * hash for this.
343 *
344 * Use low bits of timebase as random generator
345 */
346 __asm ("mftb %0" : "=r"(i));
347 pt = ptable + idx * 8 + (i & 7);
348 pt->pte_hi &= ~PTE_VALID;
349 ps = *pt;
350 __asm __volatile ("sync");
351 tlbie(addr);
352 tlbsync();
353 *pt = po->po_pte;
354 __asm __volatile ("sync");
355 pt->pte_hi |= PTE_VALID;
356 po->po_pte = ps;
357 if (ps.pte_hi & PTE_HID) {
358 /*
359 * We took an entry that was on the alternate
360 * hash chain, so move it to it's original
361 * chain.
362 */
363 po->po_pte.pte_hi &= ~PTE_HID;
364 LIST_REMOVE(po, po_list);
365 LIST_INSERT_HEAD(potable + (idx ^ ptab_mask),
366 po, po_list);
367 }
368 return 1;
369 }
370 }
|
435 pvo_pt->pte_hi |= PTE_VALID; |
436
|
372 return 0;
|
437 /* 438 * Update the PTE as defined in section 7.6.3.1. 439 * Note that the REF/CHG bits are from pvo_pt and thus should havce 440 * been saved so this routine can restore them (if desired). 441 */ 442 pt->pte_lo = pvo_pt->pte_lo; 443 EIEIO(); 444 pt->pte_hi = pvo_pt->pte_hi; 445 SYNC(); 446 pmap_pte_valid++; |
447} 448
|
375/*
376 * This is called during powerpc_init, before the system is really initialized.
377 */
378void
379pmap_setavailmem(u_int kernelstart, u_int kernelend)
|
449static __inline void 450pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) |
451{
|
381 struct mem_region *mp, *mp1;
382 int cnt, i;
383 u_int s, e, sz;
|
452
|
453 pvo_pt->pte_hi &= ~PTE_VALID; 454 |
455 /*
|
386 * Get memory.
|
456 * Force the reg & chg bits back into the PTEs. |
457 */
|
388 mem_regions(&mem, &avail);
389 for (mp = mem; mp->size; mp++)
390 Maxmem += btoc(mp->size);
|
458 SYNC(); |
459 460 /*
|
393 * Count the number of available entries.
|
461 * Invalidate the pte. |
462 */
|
395 for (cnt = 0, mp = avail; mp->size; mp++) {
396 cnt++;
397 }
|
463 pt->pte_hi &= ~PTE_VALID; |
464
|
465 SYNC(); 466 TLBIE(va); 467 EIEIO(); 468 TLBSYNC(); 469 SYNC(); 470 |
471 /*
|
400 * Page align all regions.
401 * Non-page aligned memory isn't very interesting to us.
402 * Also, sort the entries for ascending addresses.
|
472 * Save the reg & chg bits. |
473 */
|
404 kernelstart &= ~PAGE_MASK;
405 kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK;
406 for (mp = avail; mp->size; mp++) {
407 s = mp->start;
408 e = mp->start + mp->size;
409 /*
410 * Check whether this region holds all of the kernel.
411 */
412 if (s < kernelstart && e > kernelend) {
413 avail[cnt].start = kernelend;
414 avail[cnt++].size = e - kernelend;
415 e = kernelstart;
416 }
417 /*
418 * Look whether this regions starts within the kernel.
419 */
420 if (s >= kernelstart && s < kernelend) {
421 if (e <= kernelend)
422 goto empty;
423 s = kernelend;
424 }
425 /*
426 * Now look whether this region ends within the kernel.
427 */
428 if (e > kernelstart && e <= kernelend) {
429 if (s >= kernelstart)
430 goto empty;
431 e = kernelstart;
432 }
433 /*
434 * Now page align the start and size of the region.
435 */
436 s = round_page(s);
437 e = trunc_page(e);
438 if (e < s) {
439 e = s;
440 }
441 sz = e - s;
442 /*
443 * Check whether some memory is left here.
444 */
445 if (sz == 0) {
446 empty:
447 bcopy(mp + 1, mp,
448 (cnt - (mp - avail)) * sizeof *mp);
449 cnt--;
450 mp--;
451 continue;
452 }
|
474 pmap_pte_synch(pt, pvo_pt); 475 pmap_pte_valid--; 476} |
477
|
454 /*
455 * Do an insertion sort.
456 */
457 npgs += btoc(sz);
|
478static __inline void 479pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 480{ |
481
|
459 for (mp1 = avail; mp1 < mp; mp1++) {
460 if (s < mp1->start) {
461 break;
462 }
463 }
464
465 if (mp1 < mp) {
466 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1);
467 mp1->start = s;
468 mp1->size = sz;
469 } else {
470 mp->start = s;
471 mp->size = sz;
472 }
473 }
474
475#ifdef HTABENTS
476 ptab_cnt = HTABENTS;
477#else
478 ptab_cnt = (Maxmem + 1) / 2;
479
480 /* The minimum is 1024 PTEGs. */
481 if (ptab_cnt < 1024) {
482 ptab_cnt = 1024;
483 }
484
485 /* Round up to power of 2. */
486 __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1));
487 ptab_cnt = 1 << (32 - i);
488#endif
489
|
482 /*
|
491 * Find suitably aligned memory for HTAB.
|
483 * Invalidate the PTE |
484 */
|
493 for (mp = avail; mp->size; mp++) {
494 s = roundup(mp->start, HTABSIZE) - mp->start;
|
485 pmap_pte_unset(pt, pvo_pt, va); 486 pmap_pte_set(pt, pvo_pt); 487} |
488
|
496 if (mp->size < s + HTABSIZE) {
497 continue;
498 }
|
489/* 490 * Quick sort callout for comparing memory regions. 491 */ 492static int mr_cmp(const void *a, const void *b); 493static int om_cmp(const void *a, const void *b); |
494
|
500 ptable = (pte_t *)(mp->start + s);
|
495static int 496mr_cmp(const void *a, const void *b) 497{ 498 const struct mem_region *regiona; 499 const struct mem_region *regionb; |
500
|
502 if (mp->size == s + HTABSIZE) {
503 if (s)
504 mp->size = s;
505 else {
506 bcopy(mp + 1, mp,
507 (cnt - (mp - avail)) * sizeof *mp);
508 mp = avail;
509 }
510 break;
511 }
|
501 regiona = a; 502 regionb = b; 503 if (regiona->mr_start < regionb->mr_start) 504 return (-1); 505 else if (regiona->mr_start > regionb->mr_start) 506 return (1); 507 else 508 return (0); 509} |
510
|
513 if (s != 0) {
514 bcopy(mp, mp + 1,
515 (cnt - (mp - avail)) * sizeof *mp);
516 mp++->size = s;
517 cnt++;
518 }
|
511static int 512om_cmp(const void *a, const void *b) 513{ 514 const struct ofw_map *mapa; 515 const struct ofw_map *mapb; |
516
|
520 mp->start += s + HTABSIZE;
521 mp->size -= s + HTABSIZE;
522 break;
523 }
|
517 mapa = a; 518 mapb = b; 519 if (mapa->om_pa < mapb->om_pa) 520 return (-1); 521 else if (mapa->om_pa > mapb->om_pa) 522 return (1); 523 else 524 return (0); 525} |
526
|
525 if (!mp->size) {
526 panic("not enough memory?");
527 }
|
527void 528pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 529{ 530 ihandle_t pmem, mmui; 531 phandle_t chosen, mmu; 532 int sz; 533 int i, j; 534 vm_size_t size; 535 vm_offset_t pa, va, off; 536 u_int batl, batu; |
537
|
529 npgs -= btoc(HTABSIZE);
530 bzero((void *)ptable, HTABSIZE);
531 ptab_mask = ptab_cnt - 1;
|
538 /* 539 * Use an IBAT and a DBAT to map the bottom segment of memory 540 * where we are. 541 */ 542 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 543 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 544 __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" 545 :: "r"(batu), "r"(batl)); 546#if 0 547 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 548 batl = BATL(0x80000000, BAT_M, BAT_PP_RW); 549 __asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1" 550 :: "r"(batu), "r"(batl)); 551#endif |
552 553 /*
|
534 * We cannot do pmap_steal_memory here,
535 * since we don't run with translation enabled yet.
|
554 * Set the start and end of kva. |
555 */
|
537 s = sizeof(struct pte_ovtab) * ptab_cnt;
538 sz = round_page(s);
|
556 virtual_avail = VM_MIN_KERNEL_ADDRESS; 557 virtual_end = VM_MAX_KERNEL_ADDRESS; |
558
|
540 for (mp = avail; mp->size; mp++) {
541 if (mp->size >= sz) {
542 break;
543 }
|
559 if ((pmem = OF_finddevice("/memory")) == -1) 560 panic("pmap_bootstrap: can't locate memory device"); 561 if ((sz = OF_getproplen(pmem, "available")) == -1) 562 panic("pmap_bootstrap: can't get length of available memory"); 563 if (sizeof(phys_avail) < sz) 564 panic("pmap_bootstrap: phys_avail too small"); 565 if (sizeof(regions) < sz) 566 panic("pmap_bootstrap: regions too small"); 567 bzero(regions, sz); 568 if (OF_getprop(pmem, "available", regions, sz) == -1) 569 panic("pmap_bootstrap: can't get available memory"); 570 sz /= sizeof(*regions); 571 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 572 qsort(regions, sz, sizeof(*regions), mr_cmp); 573 phys_avail_count = 0; 574 for (i = 0, j = 0; i < sz; i++, j += 2) { 575 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 576 regions[i].mr_start + regions[i].mr_size, 577 regions[i].mr_size); 578 phys_avail[j] = regions[i].mr_start; 579 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 580 phys_avail_count++; |
581 } 582
|
546 if (!mp->size) {
547 panic("not enough memory?");
548 }
|
583 /* 584 * Allocate PTEG table. 585 */ 586#ifdef PTEGCOUNT 587 pmap_pteg_count = PTEGCOUNT; 588#else 589 pmap_pteg_count = 0x1000; |
590
|
550 npgs -= btoc(sz);
551 potable = (struct pte_ovtab *)mp->start;
552 mp->size -= sz;
553 mp->start += sz;
|
591 while (pmap_pteg_count < physmem) 592 pmap_pteg_count <<= 1; |
593
|
555 if (mp->size <= 0) {
556 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
557 }
|
594 pmap_pteg_count >>= 1; 595#endif /* PTEGCOUNT */ |
596
|
559 for (i = 0; i < ptab_cnt; i++) {
560 LIST_INIT(potable + i);
561 }
|
597 size = pmap_pteg_count * sizeof(struct pteg); 598 CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 599 size); 600 pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 601 CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 602 bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 603 pmap_pteg_mask = pmap_pteg_count - 1; |
604
|
563#ifndef MSGBUFADDR
|
605 /*
|
565 * allow for msgbuf
|
606 * Allocate PTE overflow lists. |
607 */
|
567 sz = round_page(MSGBUFSIZE);
568 mp = NULL;
|
608 size = sizeof(struct pvo_head) * pmap_pteg_count; 609 pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 610 PAGE_SIZE); 611 CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 612 for (i = 0; i < pmap_pteg_count; i++) 613 LIST_INIT(&pmap_pvo_table[i]); |
614
|
570 for (mp1 = avail; mp1->size; mp1++) {
571 if (mp1->size >= sz) {
572 mp = mp1;
573 }
574 }
|
615 /* 616 * Allocate the message buffer. 617 */ 618 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); |
619
|
576 if (mp == NULL) {
577 panic("not enough memory?");
578 }
|
620 /* 621 * Initialise the unmanaged pvo pool. 622 */ 623 pmap_upvo_zone = &pmap_upvo_zone_store; 624 zbootinit(pmap_upvo_zone, "unmanaged pvo", sizeof (struct pvo_entry), 625 pmap_upvo_pool, PMAP_PVO_SIZE); |
626
|
580 npgs -= btoc(sz);
581 msgbuf_paddr = mp->start + mp->size - sz;
582 mp->size -= sz;
|
627 /* 628 * Make sure kernel vsid is allocated as well as VSID 0. 629 */ 630 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 631 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 632 pmap_vsid_bitmap[0] |= 1; |
633
|
584 if (mp->size <= 0) {
585 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp);
586 }
587#endif
588
589 nextavail = avail->start;
590 avail_start = avail->start;
591 for (mp = avail, i = 0; mp->size; mp++) {
592 avail_end = mp->start + mp->size;
593 phys_avail[i++] = mp->start;
594 phys_avail[i++] = mp->start + mp->size;
595 }
596
597
598}
599
600void
601pmap_bootstrap()
602{
603 int i;
604 u_int32_t batl, batu;
605
|
634 /*
|
607 * Initialize kernel pmap and hardware.
|
635 * Set up the OpenFirmware pmap and add it's mappings. |
636 */
|
609 kernel_pmap = &kernel_pmap_store;
|
637 pmap_pinit(&ofw_pmap); 638 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 639 if ((chosen = OF_finddevice("/chosen")) == -1) 640 panic("pmap_bootstrap: can't find /chosen"); 641 OF_getprop(chosen, "mmu", &mmui, 4); 642 if ((mmu = OF_instance_to_package(mmui)) == -1) 643 panic("pmap_bootstrap: can't get mmu package"); 644 if ((sz = OF_getproplen(mmu, "translations")) == -1) 645 panic("pmap_bootstrap: can't get ofw translation count"); 646 if (sizeof(translations) < sz) 647 panic("pmap_bootstrap: translations too small"); 648 bzero(translations, sz); 649 if (OF_getprop(mmu, "translations", translations, sz) == -1) 650 panic("pmap_bootstrap: can't get ofw translations"); 651 CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 652 qsort(translations, sz, sizeof (*translations), om_cmp); 653 for (i = 0; i < sz; i++) { 654 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 655 translations[i].om_pa, translations[i].om_va, 656 translations[i].om_len); |
657
|
611 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs);
612 batl = BATL(0x80000000, BAT_M, BAT_PP_RW);
613 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl));
|
658 /* Drop stuff below something? */ |
659
|
615#if NPMAPS >= KERNEL_SEGMENT / 16
616 usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)]
617 |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8));
|
660 /* Enter the pages? */ 661 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 662 struct vm_page m; 663 664 m.phys_addr = translations[i].om_pa + off; 665 pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 666 VM_PROT_ALL, 1); 667 } 668 } 669#ifdef SMP 670 TLBSYNC(); |
671#endif 672
|
620#if 0 /* XXX */
|
673 /* 674 * Initialize the kernel pmap (which is statically allocated). 675 */ |
676 for (i = 0; i < 16; i++) { 677 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT;
|
623 __asm __volatile ("mtsrin %0,%1"
624 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT));
|
678 }
|
626#endif
|
679 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 680 kernel_pmap->pm_active = ~0; 681 kernel_pmap->pm_count = 1; |
682
|
628 for (i = 0; i < 16; i++) {
629 int j;
|
683 /* 684 * Allocate a kernel stack with a guard page for thread0 and map it 685 * into the kernel page map. 686 */ 687 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 688 kstack0_phys = pa; 689 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 690 CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 691 kstack0); 692 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 693 for (i = 0; i < KSTACK_PAGES; i++) { 694 pa = kstack0_phys + i * PAGE_SIZE; 695 va = kstack0 + i * PAGE_SIZE; 696 pmap_kenter(va, pa); 697 TLBIE(va); 698 } |
699
|
631 __asm __volatile ("mfsrin %0,%1"
632 : "=r" (j)
633 : "r" (i << ADDR_SR_SHFT));
|
700 /* 701 * Calculate the first and last available physical addresses. 702 */ 703 avail_start = phys_avail[0]; 704 for (i = 0; phys_avail[i + 2] != 0; i += 2) 705 ; 706 avail_end = phys_avail[i + 1]; 707 Maxmem = powerpc_btop(avail_end); |
708
|
635 kernel_pmap->pm_sr[i] = j;
636 }
|
709 /* 710 * Allocate virtual address space for the message buffer. 711 */ 712 msgbufp = (struct msgbuf *)virtual_avail; 713 virtual_avail += round_page(MSGBUF_SIZE); |
714
|
638 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT;
|
715 /* 716 * Initialize hardware. 717 */ 718 for (i = 0; i < 16; i++) { 719 __asm __volatile("mtsrin %0,%1" 720 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 721 } |
722 __asm __volatile ("mtsr %0,%1"
|
640 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT));
641
|
723 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); |
724 __asm __volatile ("sync; mtsdr1 %0; isync"
|
643 :: "r"((u_int)ptable | (ptab_mask >> 10)));
644
|
725 :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); |
726 tlbia(); 727
|
647 virtual_avail = VM_MIN_KERNEL_ADDRESS;
648 virtual_end = VM_MAX_KERNEL_ADDRESS;
|
728 pmap_bootstrapped++; |
729} 730 731/*
|
652 * Initialize anything else for pmap handling.
653 * Called during vm_init().
|
732 * Activate a user pmap. The pmap must be activated before it's address 733 * space can be accessed in any way. |
734 */ 735void
|
656pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
|
736pmap_activate(struct thread *td) |
737{
|
658 int initial_pvs;
|
738 pmap_t pm; 739 int i; |
740 741 /*
|
661 * init the pv free list
|
742 * Load all the data we need up front to encourasge the compiler to 743 * not issue any loads while we have interrupts disabled below. |
744 */
|
663 initial_pvs = vm_page_array_size;
664 if (initial_pvs < MINPV) {
665 initial_pvs = MINPV;
666 }
667 pvzone = &pvzone_store;
668 pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
669 initial_pvs * sizeof(struct pv_entry));
670 zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit,
671 vm_page_array_size);
|
745 pm = &td->td_proc->p_vmspace->vm_pmap; |
746
|
673 pmap_initialized = TRUE;
674}
|
747 KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); |
748
|
676/*
677 * Initialize a preallocated and zeroed pmap structure.
678 */
679void
680pmap_pinit(struct pmap *pm)
681{
682 int i, j;
|
749 pm->pm_active |= PCPU_GET(cpumask); |
750 751 /*
|
685 * Allocate some segment registers for this pmap.
|
752 * XXX: Address this again later? |
753 */
|
687 pm->pm_refs = 1;
688 for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) {
689 if (usedsr[i] != 0xffffffff) {
690 j = ffs(~usedsr[i]) - 1;
691 usedsr[i] |= 1 << j;
692 pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16;
693 for (i = 1; i < 16; i++) {
694 pm->pm_sr[i] = pm->pm_sr[i - 1] + 1;
695 }
696 return;
697 }
|
754 critical_enter(); 755 756 for (i = 0; i < 16; i++) { 757 __asm __volatile("mtsr %0,%1" :: "r"(i), "r"(pm->pm_sr[i])); |
758 }
|
699 panic("out of segments");
|
759 __asm __volatile("sync; isync"); 760 761 critical_exit(); |
762} 763
|
702void
703pmap_pinit2(pmap_t pmap)
|
764vm_offset_t 765pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) |
766{
|
705
706 /*
707 * Nothing to be done.
708 */
709 return;
|
767 TODO; 768 return (0); |
769} 770
|
712/*
713 * Add a reference to the given pmap.
714 */
|
771void
|
716pmap_reference(struct pmap *pm)
|
772pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) |
773{
|
718
719 pm->pm_refs++;
|
774 TODO; |
775} 776
|
722/*
723 * Retire the given pmap from service.
724 * Should only be called if the map contains no valid mappings.
725 */
|
777void
|
727pmap_destroy(struct pmap *pm)
|
778pmap_clear_modify(vm_page_t m) |
779{ 780
|
730 if (--pm->pm_refs == 0) {
731 pmap_release(pm);
732 free((caddr_t)pm, M_VMPGDATA);
733 }
|
781 if (m->flags * PG_FICTITIOUS) 782 return; 783 pmap_clear_bit(m, PTE_CHG); |
784} 785
|
736/*
737 * Release any resources held by the given physical map.
738 * Called when a pmap initialized by pmap_pinit is being released.
739 */
|
786void
|
741pmap_release(struct pmap *pm)
|
787pmap_collect(void) |
788{
|
743 int i, j;
744
745 if (!pm->pm_sr[0]) {
746 panic("pmap_release");
747 }
748 i = pm->pm_sr[0] / 16;
749 j = i % (sizeof usedsr[0] * 8);
750 i /= sizeof usedsr[0] * 8;
751 usedsr[i] &= ~(1 << j);
|
789 TODO; |
790} 791
|
754/*
755 * Copy the range specified by src_addr/len
756 * from the source map to the range dst_addr/len
757 * in the destination map.
758 *
759 * This routine is only advisory and need not do anything.
760 */
|
792void
|
762pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr,
763 vm_size_t len, vm_offset_t src_addr)
|
793pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 794 vm_size_t len, vm_offset_t src_addr) |
795{
|
765
766 return;
|
796 TODO; |
797} 798
|
769/*
770 * Garbage collects the physical map system for
771 * pages which are no longer used.
772 * Success need not be guaranteed -- that is, there
773 * may well be pages which are not referenced, but
774 * others may be collected.
775 * Called by the pageout daemon when pages are scarce.
776 */
|
799void
|
778pmap_collect(void)
|
800pmap_copy_page(vm_offset_t src, vm_offset_t dst) |
801{
|
780
781 return;
|
802 TODO; |
803} 804 805/*
|
785 * Fill the given physical page with zeroes.
|
806 * Zero a page of physical memory by temporarily mapping it into the tlb. |
807 */ 808void 809pmap_zero_page(vm_offset_t pa) 810{
|
790#if 0
791 bzero((caddr_t)pa, PAGE_SIZE);
792#else
|
811 caddr_t va; |
812 int i; 813
|
795 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) {
796 __asm __volatile ("dcbz 0,%0" :: "r"(pa));
797 pa += CACHELINESIZE;
|
814 if (pa < SEGMENT_LENGTH) { 815 va = (caddr_t) pa; 816 } else if (pmap_initialized) { 817 if (pmap_pvo_zeropage == NULL) 818 pmap_pvo_zeropage = pmap_rkva_alloc(); 819 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 820 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 821 } else { 822 panic("pmap_zero_page: can't zero pa %#x", pa); |
823 }
|
799#endif
|
824 825 bzero(va, PAGE_SIZE); 826 827 for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { 828 __asm __volatile("dcbz 0,%0" :: "r"(va)); 829 va += CACHELINESIZE; 830 } 831 832 if (pa >= SEGMENT_LENGTH) 833 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); |
834} 835 836void 837pmap_zero_page_area(vm_offset_t pa, int off, int size) 838{
|
805
806 bzero((caddr_t)pa + off, size);
|
839 TODO; |
840} 841 842/*
|
810 * Copy the given physical source page to its destination.
|
843 * Map the given physical page at the specified virtual address in the 844 * target pmap with the protection requested. If specified the page 845 * will be wired down. |
846 */ 847void
|
813pmap_copy_page(vm_offset_t src, vm_offset_t dst)
|
848pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 849 boolean_t wired) |
850{
|
851 struct pvo_head *pvo_head; 852 vm_zone_t zone; 853 u_int pte_lo, pvo_flags; 854 int error; |
855
|
816 bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE);
817}
|
856 if (!pmap_initialized) { 857 pvo_head = &pmap_pvo_kunmanaged; 858 zone = pmap_upvo_zone; 859 pvo_flags = 0; 860 } else { 861 pvo_head = pa_to_pvoh(m->phys_addr); 862 zone = pmap_mpvo_zone; 863 pvo_flags = PVO_MANAGED; 864 } |
865
|
819static struct pv_entry *
820pmap_alloc_pv()
821{
822 pv_entry_count++;
|
866 pte_lo = PTE_I | PTE_G; |
867
|
824 if (pv_entry_high_water &&
825 (pv_entry_count > pv_entry_high_water) &&
826 (pmap_pagedaemon_waken == 0)) {
827 pmap_pagedaemon_waken = 1;
828 wakeup(&vm_pages_needed);
829 }
|
868 if (prot & VM_PROT_WRITE) 869 pte_lo |= PTE_BW; 870 else 871 pte_lo |= PTE_BR; |
872
|
831 return zalloc(pvzone);
832}
|
873 if (prot & VM_PROT_EXECUTE) 874 pvo_flags |= PVO_EXECUTABLE; |
875
|
834static void
835pmap_free_pv(struct pv_entry *pv)
836{
|
876 if (wired) 877 pvo_flags |= PVO_WIRED; |
878
|
838 pv_entry_count--;
839 zfree(pvzone, pv);
840}
|
879 critical_enter(); |
880
|
842/*
843 * We really hope that we don't need overflow entries
844 * before the VM system is initialized!
845 *
846 * XXX: Should really be switched over to the zone allocator.
847 */
848static struct pte_ovfl *
849poalloc()
850{
851 struct po_page *pop;
852 struct pte_ovfl *po;
853 vm_page_t mem;
854 int i;
855
856 if (!pmap_initialized) {
857 panic("poalloc");
858 }
859
860 if (po_nfree == 0) {
|
881 error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo, 882 pvo_flags); 883 884 critical_exit(); 885 886 if (error == ENOENT) { |
887 /*
|
862 * Since we cannot use maps for potable allocation,
863 * we have to steal some memory from the VM system. XXX
|
888 * Flush the real memory from the cache. |
889 */
|
865 mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM);
866 po_pcnt++;
867 pop = (struct po_page *)VM_PAGE_TO_PHYS(mem);
868 pop->pop_pgi.pgi_page = mem;
869 LIST_INIT(&pop->pop_pgi.pgi_freelist);
870 for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) {
871 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po,
872 po_list);
|
890 if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) { 891 pmap_syncicache(m->phys_addr, PAGE_SIZE); |
892 }
|
874 po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1;
875 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
876 po = pop->pop_po;
877 } else {
878 po_nfree--;
879 pop = po_page_freelist.lh_first;
880 if (--pop->pop_pgi.pgi_nfree <= 0) {
881 LIST_REMOVE(pop, pop_pgi.pgi_list);
882 }
883 po = pop->pop_pgi.pgi_freelist.lh_first;
884 LIST_REMOVE(po, po_list);
|
893 }
|
886
887 return po;
|
894} 895
|
890static void
891pofree(struct pte_ovfl *po, int freepage)
|
896vm_offset_t 897pmap_extract(pmap_t pmap, vm_offset_t va) |
898{
|
893 struct po_page *pop;
894
895 pop = (struct po_page *)trunc_page((vm_offset_t)po);
896 switch (++pop->pop_pgi.pgi_nfree) {
897 case NPOPPG:
898 if (!freepage) {
899 break;
900 }
901 po_nfree -= NPOPPG - 1;
902 po_pcnt--;
903 LIST_REMOVE(pop, pop_pgi.pgi_list);
904 vm_page_free(pop->pop_pgi.pgi_page);
905 return;
906 case 1:
907 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list);
908 default:
909 break;
910 }
911 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list);
912 po_nfree++;
|
899 TODO; 900 return (0); |
901} 902 903/*
|
916 * This returns whether this is the first mapping of a page.
|
904 * Grow the number of kernel page table entries. Unneeded. |
905 */
|
918static int
919pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa)
|
906void 907pmap_growkernel(vm_offset_t addr) |
908{
|
921 struct pv_entry *pv, *npv;
922 int s, first;
923
924 if (!pmap_initialized) {
925 return 0;
926 }
|
909} |
910
|
928 s = splimp();
|
911void 912pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 913{ |
914
|
930 pv = pa_to_pv(pa);
931 first = pv->pv_idx;
932 if (pv->pv_idx == -1) {
933 /*
934 * No entries yet, use header as the first entry.
935 */
936 pv->pv_va = va;
937 pv->pv_idx = pteidx;
938 pv->pv_next = NULL;
939 } else {
940 /*
941 * There is at least one other VA mapping this page.
942 * Place this entry after the header.
943 */
944 npv = pmap_alloc_pv();
945 npv->pv_va = va;
946 npv->pv_idx = pteidx;
947 npv->pv_next = pv->pv_next;
948 pv->pv_next = npv;
949 }
950 splx(s);
951 return first;
|
915 CTR(KTR_PMAP, "pmap_init"); |
916} 917
|
954static void
955pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte)
|
918void 919pmap_init2(void) |
920{
|
957 struct pv_entry *pv, *npv;
958 char *attr;
|
921
|
960 /*
961 * First transfer reference/change bits to cache.
962 */
963 attr = pa_to_attr(pa);
964 if (attr == NULL) {
965 return;
966 }
967 *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT;
968
969 /*
970 * Remove from the PV table.
971 */
972 pv = pa_to_pv(pa);
973
974 /*
975 * If it is the first entry on the list, it is actually
976 * in the header and we must copy the following entry up
977 * to the header. Otherwise we must search the list for
978 * the entry. In either case we free the now unused entry.
979 */
980 if (pteidx == pv->pv_idx && va == pv->pv_va) {
981 npv = pv->pv_next;
982 if (npv) {
983 *pv = *npv;
984 pmap_free_pv(npv);
985 } else {
986 pv->pv_idx = -1;
987 }
988 } else {
989 for (; (npv = pv->pv_next); pv = npv) {
990 if (pteidx == npv->pv_idx && va == npv->pv_va) {
991 break;
992 }
993 }
994 if (npv) {
995 pv->pv_next = npv->pv_next;
996 pmap_free_pv(npv);
997 }
998#ifdef DIAGNOSTIC
999 else {
1000 panic("pmap_remove_pv: not on list\n");
1001 }
1002#endif
1003 }
|
922 CTR(KTR_PMAP, "pmap_init2"); 923 zinitna(pmap_upvo_zone, &pmap_upvo_zone_obj, NULL, 0, PMAP_PVO_SIZE, 924 ZONE_INTERRUPT, 1); 925 pmap_mpvo_zone = zinit("managed pvo", sizeof(struct pvo_entry), 926 PMAP_PVO_SIZE, ZONE_INTERRUPT, 1); 927 pmap_initialized = TRUE; |
928} 929
|
930boolean_t 931pmap_is_modified(vm_page_t m) 932{ 933 TODO; 934 return (0); 935} 936 937void 938pmap_clear_reference(vm_page_t m) 939{ 940 TODO; 941} 942 943int 944pmap_ts_referenced(vm_page_t m) 945{ 946 TODO; 947 return (0); 948} 949 |
950/*
|
1007 * Insert physical page at pa into the given pmap at virtual address va.
|
951 * Map a wired page into kernel virtual address space. |
952 */ 953void
|
1010pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot,
1011 boolean_t wired)
|
954pmap_kenter(vm_offset_t va, vm_offset_t pa) |
955{
|
1013 sr_t sr;
1014 int idx, s;
1015 pte_t pte;
1016 struct pte_ovfl *po;
1017 struct mem_region *mp;
1018 vm_offset_t pa;
|
956 u_int pte_lo; 957 int error; 958 int i; |
959
|
1020 pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK;
|
960#if 0 961 if (va < VM_MIN_KERNEL_ADDRESS) 962 panic("pmap_kenter: attempt to enter non-kernel address %#x", 963 va); 964#endif |
965
|
1022 /*
1023 * Have to remove any existing mapping first.
1024 */
1025 pmap_remove(pm, va, va + PAGE_SIZE);
1026
1027 /*
1028 * Compute the HTAB index.
1029 */
1030 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1031 /*
1032 * Construct the PTE.
1033 *
1034 * Note: Don't set the valid bit for correct operation of tlb update.
1035 */
1036 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT)
1037 | ((va & ADDR_PIDX) >> ADDR_API_SHFT);
1038 pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G;
1039
1040 for (mp = mem; mp->size; mp++) {
1041 if (pa >= mp->start && pa < mp->start + mp->size) {
1042 pte.pte_lo &= ~(PTE_I | PTE_G);
|
966 pte_lo = PTE_I | PTE_G | PTE_BW; 967 for (i = 0; phys_avail[i + 2] != 0; i += 2) { 968 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { 969 pte_lo &= ~(PTE_I | PTE_G); |
970 break; 971 } 972 }
|
1046 if (prot & VM_PROT_WRITE) {
1047 pte.pte_lo |= PTE_RW;
1048 } else {
1049 pte.pte_lo |= PTE_RO;
1050 }
|
973
|
1052 /*
1053 * Now record mapping for later back-translation.
1054 */
1055 if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) {
1056 if (pmap_enter_pv(idx, va, pa)) {
1057 /*
1058 * Flush the real memory from the cache.
1059 */
1060 __syncicache((void *)pa, PAGE_SIZE);
1061 }
1062 }
|
974 critical_enter(); |
975
|
1064 s = splimp();
1065 pm->pm_stats.resident_count++;
1066 /*
1067 * Try to insert directly into HTAB.
1068 */
1069 if (pte_insert(idx, &pte)) {
1070 splx(s);
1071 return;
1072 }
|
976 error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 977 &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); |
978
|
979 critical_exit(); 980 981 if (error != 0 && error != ENOENT) 982 panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 983 pa, error); 984 |
985 /*
|
1075 * Have to allocate overflow entry.
1076 *
1077 * Note, that we must use real addresses for these.
|
986 * Flush the real memory from the instruction cache. |
987 */
|
1079 po = poalloc();
1080 po->po_pte = pte;
1081 LIST_INSERT_HEAD(potable + idx, po, po_list);
1082 splx(s);
|
988 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 989 pmap_syncicache(pa, PAGE_SIZE); 990 } |
991} 992
|
1085void
1086pmap_kenter(vm_offset_t va, vm_offset_t pa)
|
993vm_offset_t 994pmap_kextract(vm_offset_t va) |
995{
|
1088 struct vm_page pg;
1089
1090 pg.phys_addr = pa;
1091 pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE);
|
996 TODO; 997 return (0); |
998} 999 1000void 1001pmap_kremove(vm_offset_t va) 1002{
|
1097 pmap_remove(kernel_pmap, va, va + PAGE_SIZE);
|
1003 TODO; |
1004} 1005 1006/*
|
1101 * Remove the given range of mapping entries.
|
1007 * Map a range of physical addresses into kernel virtual address space. 1008 * 1009 * The value passed in *virt is a suggested virtual address for the mapping. 1010 * Architectures which can support a direct-mapped physical to virtual region 1011 * can return the appropriate address within that region, leaving '*virt' 1012 * unchanged. We cannot and therefore do not; *virt is updated with the 1013 * first usable address after the mapped region. |
1014 */
|
1103void
1104pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva)
|
1015vm_offset_t 1016pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) |
1017{
|
1106 int idx, i, s;
1107 sr_t sr;
1108 pte_t *ptp;
1109 struct pte_ovfl *po, *npo;
|
1018 vm_offset_t sva, va; |
1019
|
1111 s = splimp();
1112 while (va < endva) {
1113 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1114 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1115 if (ptematch(ptp, sr, va, PTE_VALID)) {
1116 pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1117 ptp->pte_hi &= ~PTE_VALID;
1118 __asm __volatile ("sync");
1119 tlbie(va);
1120 tlbsync();
1121 pm->pm_stats.resident_count--;
1122 }
1123 }
1124 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1125 ptp++) {
1126 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1127 pmap_remove_pv(idx, va, ptp->pte_lo, ptp);
1128 ptp->pte_hi &= ~PTE_VALID;
1129 __asm __volatile ("sync");
1130 tlbie(va);
1131 tlbsync();
1132 pm->pm_stats.resident_count--;
1133 }
1134 }
1135 for (po = potable[idx].lh_first; po; po = npo) {
1136 npo = po->po_list.le_next;
1137 if (ptematch(&po->po_pte, sr, va, 0)) {
1138 pmap_remove_pv(idx, va, po->po_pte.pte_lo,
1139 &po->po_pte);
1140 LIST_REMOVE(po, po_list);
1141 pofree(po, 1);
1142 pm->pm_stats.resident_count--;
1143 }
1144 }
1145 va += PAGE_SIZE;
1146 }
1147 splx(s);
|
1020 sva = *virt; 1021 va = sva; 1022 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1023 pmap_kenter(va, pa_start); 1024 *virt = va; 1025 return (sva); |
1026} 1027
|
1150static pte_t *
1151pte_find(struct pmap *pm, vm_offset_t va)
|
1028int 1029pmap_mincore(pmap_t pmap, vm_offset_t addr) |
1030{
|
1153 int idx, i;
1154 sr_t sr;
1155 pte_t *ptp;
1156 struct pte_ovfl *po;
1157
1158 idx = pteidx(sr = ptesr(pm->pm_sr, va), va);
1159 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1160 if (ptematch(ptp, sr, va, PTE_VALID)) {
1161 return ptp;
1162 }
1163 }
1164 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) {
1165 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) {
1166 return ptp;
1167 }
1168 }
1169 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) {
1170 if (ptematch(&po->po_pte, sr, va, 0)) {
1171 return &po->po_pte;
1172 }
1173 }
1174 return 0;
|
1031 TODO; 1032 return (0); |
1033} 1034
|
1177/*
1178 * Get the physical page address for the given pmap/virtual address.
|
1035/* 1036 * Create the uarea for a new process. 1037 * This routine directly affects the fork perf for a process. |
1038 */
|
1180vm_offset_t
1181pmap_extract(pmap_t pm, vm_offset_t va)
1182{
1183 pte_t *ptp;
1184 int s;
1185
1186 s = splimp();
1187
1188 if (!(ptp = pte_find(pm, va))) {
1189 splx(s);
1190 return (0);
1191 }
1192 splx(s);
1193 return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF));
1194}
1195
1196/*
1197 * Lower the protection on the specified range of this pmap.
1198 *
1199 * There are only two cases: either the protection is going to 0,
1200 * or it is going to read-only.
1201 */
|
1039void
|
1203pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
|
1040pmap_new_proc(struct proc *p) |
1041{
|
1205 pte_t *ptp;
1206 int valid, s;
1207
1208 if (prot & VM_PROT_READ) {
1209 s = splimp();
1210 while (sva < eva) {
1211 ptp = pte_find(pm, sva);
1212 if (ptp) {
1213 valid = ptp->pte_hi & PTE_VALID;
1214 ptp->pte_hi &= ~PTE_VALID;
1215 __asm __volatile ("sync");
1216 tlbie(sva);
1217 tlbsync();
1218 ptp->pte_lo &= ~PTE_PP;
1219 ptp->pte_lo |= PTE_RO;
1220 __asm __volatile ("sync");
1221 ptp->pte_hi |= valid;
1222 }
1223 sva += PAGE_SIZE;
1224 }
1225 splx(s);
1226 return;
1227 }
1228 pmap_remove(pm, sva, eva);
1229}
|
1042 vm_object_t upobj; 1043 vm_offset_t up; 1044 vm_page_t m; 1045 u_int i; |
1046
|
1231boolean_t
1232ptemodify(vm_page_t pg, u_int mask, u_int val)
1233{
1234 vm_offset_t pa;
1235 struct pv_entry *pv;
1236 pte_t *ptp;
1237 struct pte_ovfl *po;
1238 int i, s;
1239 char *attr;
1240 int rv;
1241
1242 pa = VM_PAGE_TO_PHYS(pg);
1243
|
1047 /*
|
1245 * First modify bits in cache.
|
1048 * Allocate the object for the upages. |
1049 */
|
1247 attr = pa_to_attr(pa);
1248 if (attr == NULL) {
1249 return FALSE;
|
1050 upobj = p->p_upages_obj; 1051 if (upobj == NULL) { 1052 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 1053 p->p_upages_obj = upobj; |
1054 } 1055
|
1252 *attr &= ~mask >> ATTRSHFT;
1253 *attr |= val >> ATTRSHFT;
1254
1255 pv = pa_to_pv(pa);
1256 if (pv->pv_idx < 0) {
1257 return FALSE;
|
1056 /* 1057 * Get a kernel virtual address for the uarea for this process. 1058 */ 1059 up = (vm_offset_t)p->p_uarea; 1060 if (up == 0) { 1061 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 1062 if (up == 0) 1063 panic("pmap_new_proc: upage allocation failed"); 1064 p->p_uarea = (struct user *)up; |
1065 } 1066
|
1260 rv = FALSE;
1261 s = splimp();
1262 for (; pv; pv = pv->pv_next) {
1263 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1264 if ((ptp->pte_hi & PTE_VALID)
1265 && (ptp->pte_lo & PTE_RPGN) == pa) {
1266 ptp->pte_hi &= ~PTE_VALID;
1267 __asm __volatile ("sync");
1268 tlbie(pv->pv_va);
1269 tlbsync();
1270 rv |= ptp->pte_lo & mask;
1271 ptp->pte_lo &= ~mask;
1272 ptp->pte_lo |= val;
1273 __asm __volatile ("sync");
1274 ptp->pte_hi |= PTE_VALID;
1275 }
1276 }
1277 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1278 --i >= 0; ptp++) {
1279 if ((ptp->pte_hi & PTE_VALID)
1280 && (ptp->pte_lo & PTE_RPGN) == pa) {
1281 ptp->pte_hi &= ~PTE_VALID;
1282 __asm __volatile ("sync");
1283 tlbie(pv->pv_va);
1284 tlbsync();
1285 rv |= ptp->pte_lo & mask;
1286 ptp->pte_lo &= ~mask;
1287 ptp->pte_lo |= val;
1288 __asm __volatile ("sync");
1289 ptp->pte_hi |= PTE_VALID;
1290 }
1291 }
1292 for (po = potable[pv->pv_idx].lh_first; po;
1293 po = po->po_list.le_next) {
1294 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1295 rv |= ptp->pte_lo & mask;
1296 po->po_pte.pte_lo &= ~mask;
1297 po->po_pte.pte_lo |= val;
1298 }
1299 }
1300 }
1301 splx(s);
1302 return rv != 0;
1303}
|
1067 for (i = 0; i < UAREA_PAGES; i++) { 1068 /* 1069 * Get a uarea page. 1070 */ 1071 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); |
1072
|
1305int
1306ptebits(vm_page_t pg, int bit)
1307{
1308 struct pv_entry *pv;
1309 pte_t *ptp;
1310 struct pte_ovfl *po;
1311 int i, s, bits;
1312 char *attr;
1313 vm_offset_t pa;
|
1073 /* 1074 * Wire the page. 1075 */ 1076 m->wire_count++; |
1077
|
1315 bits = 0;
1316 pa = VM_PAGE_TO_PHYS(pg);
|
1078 /* 1079 * Enter the page into the kernel address space. 1080 */ 1081 pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); |
1082
|
1318 /*
1319 * First try the cache.
1320 */
1321 attr = pa_to_attr(pa);
1322 if (attr == NULL) {
1323 return 0;
|
1083 vm_page_wakeup(m); 1084 vm_page_flag_clear(m, PG_ZERO); 1085 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1086 m->valid = VM_PAGE_BITS_ALL; |
1087 }
|
1325 bits |= (*attr << ATTRSHFT) & bit;
1326 if (bits == bit) {
1327 return bits;
1328 }
|
1088} |
1089
|
1330 pv = pa_to_pv(pa);
1331 if (pv->pv_idx < 0) {
1332 return 0;
1333 }
1334
1335 s = splimp();
1336 for (; pv; pv = pv->pv_next) {
1337 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) {
1338 if ((ptp->pte_hi & PTE_VALID)
1339 && (ptp->pte_lo & PTE_RPGN) == pa) {
1340 bits |= ptp->pte_lo & bit;
1341 if (bits == bit) {
1342 splx(s);
1343 return bits;
1344 }
1345 }
1346 }
1347 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8;
1348 --i >= 0; ptp++) {
1349 if ((ptp->pte_hi & PTE_VALID)
1350 && (ptp->pte_lo & PTE_RPGN) == pa) {
1351 bits |= ptp->pte_lo & bit;
1352 if (bits == bit) {
1353 splx(s);
1354 return bits;
1355 }
1356 }
1357 }
1358 for (po = potable[pv->pv_idx].lh_first; po;
1359 po = po->po_list.le_next) {
1360 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1361 bits |= po->po_pte.pte_lo & bit;
1362 if (bits == bit) {
1363 splx(s);
1364 return bits;
1365 }
1366 }
1367 }
1368 }
1369 splx(s);
1370 return bits;
|
1090void 1091pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 1092 vm_pindex_t pindex, vm_size_t size, int limit) 1093{ 1094 TODO; |
1095} 1096 1097/*
|
1374 * Lower the protection on the specified physical page.
1375 *
1376 * There are only two cases: either the protection is going to 0,
1377 * or it is going to read-only.
|
1098 * Lower the permission for all mappings to a given page. |
1099 */ 1100void 1101pmap_page_protect(vm_page_t m, vm_prot_t prot) 1102{
|
1382 vm_offset_t pa;
1383 vm_offset_t va;
1384 pte_t *ptp;
1385 struct pte_ovfl *po, *npo;
1386 int i, s, idx;
1387 struct pv_entry *pv;
|
1103 struct pvo_head *pvo_head; 1104 struct pvo_entry *pvo, *next_pvo; 1105 struct pte *pt; |
1106
|
1389 pa = VM_PAGE_TO_PHYS(m);
1390
1391 pa &= ~ADDR_POFF;
1392 if (prot & VM_PROT_READ) {
1393 ptemodify(m, PTE_PP, PTE_RO);
|
1107 /* 1108 * Since the routine only downgrades protection, if the 1109 * maximal protection is desired, there isn't any change 1110 * to be made. 1111 */ 1112 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1113 (VM_PROT_READ|VM_PROT_WRITE)) |
1114 return;
|
1395 }
|
1115
|
1397 pv = pa_to_pv(pa);
1398 if (pv == NULL) {
1399 return;
1400 }
|
1116 critical_enter(); |
1117
|
1402 s = splimp();
1403 while (pv->pv_idx >= 0) {
1404 idx = pv->pv_idx;
1405 va = pv->pv_va;
1406 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) {
1407 if ((ptp->pte_hi & PTE_VALID)
1408 && (ptp->pte_lo & PTE_RPGN) == pa) {
1409 pmap_remove_pv(idx, va, pa, ptp);
1410 ptp->pte_hi &= ~PTE_VALID;
1411 __asm __volatile ("sync");
1412 tlbie(va);
1413 tlbsync();
1414 goto next;
1415 }
|
1118 pvo_head = vm_page_to_pvoh(m); 1119 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1120 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1121 PMAP_PVO_CHECK(pvo); /* sanity check */ 1122 1123 /* 1124 * Downgrading to no mapping at all, we just remove the entry. 1125 */ 1126 if ((prot & VM_PROT_READ) == 0) { 1127 pmap_pvo_remove(pvo, -1); 1128 continue; |
1129 }
|
1417 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0;
1418 ptp++) {
1419 if ((ptp->pte_hi & PTE_VALID)
1420 && (ptp->pte_lo & PTE_RPGN) == pa) {
1421 pmap_remove_pv(idx, va, pa, ptp);
1422 ptp->pte_hi &= ~PTE_VALID;
1423 __asm __volatile ("sync");
1424 tlbie(va);
1425 tlbsync();
1426 goto next;
1427 }
|
1130 1131 /* 1132 * If EXEC permission is being revoked, just clear the flag 1133 * in the PVO. 1134 */ 1135 if ((prot & VM_PROT_EXECUTE) == 0) 1136 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1137 1138 /* 1139 * If this entry is already RO, don't diddle with the page 1140 * table. 1141 */ 1142 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1143 PMAP_PVO_CHECK(pvo); 1144 continue; |
1145 }
|
1429 for (po = potable[idx].lh_first; po; po = npo) {
1430 npo = po->po_list.le_next;
1431 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) {
1432 pmap_remove_pv(idx, va, pa, &po->po_pte);
1433 LIST_REMOVE(po, po_list);
1434 pofree(po, 1);
1435 goto next;
1436 }
1437 }
1438next:
|
1146 1147 /* 1148 * Grab the PTE before we diddle the bits so pvo_to_pte can 1149 * verify the pte contents are as expected. 1150 */ 1151 pt = pmap_pvo_to_pte(pvo, -1); 1152 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1153 pvo->pvo_pte.pte_lo |= PTE_BR; 1154 if (pt != NULL) 1155 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1156 PMAP_PVO_CHECK(pvo); /* sanity check */ |
1157 }
|
1440 splx(s);
|
1158 1159 critical_exit(); |
1160} 1161 1162/*
|
1444 * Activate the address space for the specified process. If the process
1445 * is the current process, load the new MMU context.
|
1163 * Make the specified page pageable (or not). Unneeded. |
1164 */ 1165void
|
1448pmap_activate(struct thread *td)
|
1166pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1167 boolean_t pageable) |
1168{
|
1450 struct pcb *pcb;
1451 pmap_t pmap;
1452 pmap_t rpm;
1453 int psl, i, ksr, seg;
|
1169} |
1170
|
1455 pcb = td->td_pcb;
1456 pmap = vmspace_pmap(td->td_proc->p_vmspace);
|
1171boolean_t 1172pmap_page_exists(pmap_t pmap, vm_page_t m) 1173{ 1174 TODO; 1175 return (0); 1176} |
1177
|
1458 /*
1459 * XXX Normally performed in cpu_fork().
1460 */
1461 if (pcb->pcb_pm != pmap) {
1462 pcb->pcb_pm = pmap;
1463 (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap,
1464 (vm_offset_t)pcb->pcb_pm);
1465 }
|
1178static u_int pmap_vsidcontext; |
1179
|
1467 if (td == curthread) {
1468 /* Disable interrupts while switching. */
1469 psl = mfmsr();
1470 mtmsr(psl & ~PSL_EE);
|
1180void 1181pmap_pinit(pmap_t pmap) 1182{ 1183 int i, mask; 1184 u_int entropy; |
1185
|
1472#if 0 /* XXX */
1473 /* Store pointer to new current pmap. */
1474 curpm = pcb->pcb_pmreal;
1475#endif
|
1186 entropy = 0; 1187 __asm __volatile("mftb %0" : "=r"(entropy)); |
1188
|
1477 /* Save kernel SR. */
1478 __asm __volatile("mfsr %0,14" : "=r"(ksr) :);
|
1189 /* 1190 * Allocate some segment registers for this pmap. 1191 */ 1192 pmap->pm_count = 1; 1193 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1194 u_int hash, n; |
1195 1196 /*
|
1481 * Set new segment registers. We use the pmap's real
1482 * address to avoid accessibility problems.
|
1197 * Create a new value by mutiplying by a prime and adding in 1198 * entropy from the timebase register. This is to make the 1199 * VSID more random so that the PT hash function collides 1200 * less often. (Note that the prime casues gcc to do shifts 1201 * instead of a multiply.) |
1202 */
|
1484 rpm = pcb->pcb_pmreal;
1485 for (i = 0; i < 16; i++) {
1486 seg = rpm->pm_sr[i];
1487 __asm __volatile("mtsrin %0,%1"
1488 :: "r"(seg), "r"(i << ADDR_SR_SHFT));
|
1203 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1204 hash = pmap_vsidcontext & (NPMAPS - 1); 1205 if (hash == 0) /* 0 is special, avoid it */ 1206 continue; 1207 n = hash >> 5; 1208 mask = 1 << (hash & (VSID_NBPW - 1)); 1209 hash = (pmap_vsidcontext & 0xfffff); 1210 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1211 /* anything free in this bucket? */ 1212 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1213 entropy = (pmap_vsidcontext >> 20); 1214 continue; 1215 } 1216 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1217 mask = 1 << i; 1218 hash &= 0xfffff & ~(VSID_NBPW - 1); 1219 hash |= i; |
1220 }
|
1490
1491 /* Restore kernel SR. */
1492 __asm __volatile("mtsr 14,%0" :: "r"(ksr));
1493
1494 /* Interrupts are OK again. */
1495 mtmsr(psl);
|
1221 pmap_vsid_bitmap[n] |= mask; 1222 for (i = 0; i < 16; i++) 1223 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1224 return; |
1225 }
|
1226 1227 panic("pmap_pinit: out of segments"); |
1228} 1229 1230/*
|
1500 * Add a list of wired pages to the kva
1501 * this routine is only used for temporary
1502 * kernel mappings that do not need to have
1503 * page modification or references recorded.
1504 * Note that old mappings are simply written
1505 * over. The page *must* be wired.
|
1231 * Initialize the pmap associated with process 0. |
1232 */ 1233void
|
1508pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
|
1234pmap_pinit0(pmap_t pm) |
1235{
|
1510 int i;
|
1236
|
1512 for (i = 0; i < count; i++) {
1513 vm_offset_t tva = va + i * PAGE_SIZE;
1514 pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
1515 }
|
1237 pmap_pinit(pm); 1238 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); |
1239} 1240
|
1518/*
1519 * this routine jerks page mappings from the
1520 * kernel -- it is meant only for temporary mappings.
1521 */
|
1241void
|
1523pmap_qremove(vm_offset_t va, int count)
|
1242pmap_pinit2(pmap_t pmap) |
1243{
|
1525 vm_offset_t end_va;
|
1244 /* XXX: Remove this stub when no longer called */ 1245} |
1246
|
1527 end_va = va + count*PAGE_SIZE;
1528
1529 while (va < end_va) {
1530 unsigned *pte;
1531
1532 pte = (unsigned *)vtopte(va);
1533 *pte = 0;
1534 tlbie(va);
1535 va += PAGE_SIZE;
1536 }
|
1247void 1248pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry) 1249{ 1250 TODO; |
1251} 1252
|
1539/*
1540 * pmap_ts_referenced:
1541 *
1542 * Return the count of reference bits for a page, clearing all of them.
1543 */
1544int
1545pmap_ts_referenced(vm_page_t m)
|
1253void 1254pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) |
1255{
|
1256 TODO; 1257} |
1258
|
1548 /* XXX: coming soon... */
|
1259vm_offset_t 1260pmap_phys_address(int ppn) 1261{ 1262 TODO; |
1263 return (0); 1264} 1265
|
1552/*
1553 * this routine returns true if a physical page resides
1554 * in the given pmap.
1555 */
1556boolean_t
1557pmap_page_exists(pmap_t pmap, vm_page_t m)
|
1266void 1267pmap_qenter(vm_offset_t va, vm_page_t *m, int count) |
1268{
|
1559#if 0 /* XXX: This must go! */
1560 register pv_entry_t pv;
1561 int s;
|
1269 int i; |
1270
|
1563 if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1564 return FALSE;
|
1271 for (i = 0; i < count; i++, va += PAGE_SIZE) 1272 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 1273} |
1274
|
1566 s = splvm();
1567
1568 /*
1569 * Not found, check current mappings returning immediately if found.
1570 */
1571 for (pv = pv_table; pv; pv = pv->pv_next) {
1572 if (pv->pv_pmap == pmap) {
1573 splx(s);
1574 return TRUE;
1575 }
1576 }
1577 splx(s);
1578#endif
1579 return (FALSE);
|
1275void 1276pmap_qremove(vm_offset_t va, int count) 1277{ 1278 TODO; |
1279} 1280 1281/*
|
1583 * Used to map a range of physical addresses into kernel
1584 * virtual address space.
1585 *
1586 * For now, VM is already on, we only need to map the
1587 * specified memory.
|
1282 * Add a reference to the specified pmap. |
1283 */
|
1589vm_offset_t
1590pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
|
1284void 1285pmap_reference(pmap_t pm) |
1286{
|
1592 vm_offset_t sva, va;
|
1287
|
1594 sva = *virt;
1595 va = sva;
1596
1597 while (start < end) {
1598 pmap_kenter(va, start);
1599 va += PAGE_SIZE;
1600 start += PAGE_SIZE;
1601 }
1602
1603 *virt = va;
1604 return (sva);
|
1288 if (pm != NULL) 1289 pm->pm_count++; |
1290} 1291
|
1607vm_offset_t
1608pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
|
1292void 1293pmap_release(pmap_t pmap) |
1294{
|
1610
1611 return (addr);
|
1295 TODO; |
1296} 1297
|
1614int
1615pmap_mincore(pmap_t pmap, vm_offset_t addr)
|
1298void 1299pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) |
1300{
|
1617
1618 /* XXX: coming soon... */
1619 return (0);
|
1301 TODO; |
1302} 1303 1304void
|
1623pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
1624 vm_pindex_t pindex, vm_size_t size, int limit)
|
1305pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) |
1306{
|
1626
1627 /* XXX: coming soon... */
1628 return;
|
1307 TODO; |
1308} 1309 1310void
|
1632pmap_growkernel(vm_offset_t addr)
|
1311pmap_swapin_proc(struct proc *p) |
1312{
|
1313 TODO; 1314} |
1315
|
1635 /* XXX: coming soon... */
1636 return;
|
1316void 1317pmap_swapout_proc(struct proc *p) 1318{ 1319 TODO; |
1320} 1321 1322/*
|
1640 * Initialize the address space (zone) for the pv_entries. Set a
1641 * high water mark so that the system can recover from excessive
1642 * numbers of pv entries.
|
1323 * Create the kernel stack and pcb for a new thread. 1324 * This routine directly affects the fork perf for a process and 1325 * create performance for a thread. |
1326 */ 1327void
|
1645pmap_init2()
|
1328pmap_new_thread(struct thread *td) |
1329{
|
1647 int shpgperproc = PMAP_SHPGPERPROC;
|
1330 vm_object_t ksobj; 1331 vm_offset_t ks; 1332 vm_page_t m; 1333 u_int i; |
1334
|
1649 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
1650 pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
1651 pv_entry_high_water = 9 * (pv_entry_max / 10);
1652 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
|
1335 /* 1336 * Allocate object for the kstack. 1337 */ 1338 ksobj = td->td_kstack_obj; 1339 if (ksobj == NULL) { 1340 ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); 1341 td->td_kstack_obj = ksobj; 1342 } 1343 1344 /* 1345 * Get a kernel virtual address for the kstack for this thread. 1346 */ 1347 ks = td->td_kstack; 1348 if (ks == 0) { 1349 ks = kmem_alloc_nofault(kernel_map, 1350 (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); 1351 if (ks == 0) 1352 panic("pmap_new_thread: kstack allocation failed"); 1353 TLBIE(ks); 1354 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 1355 td->td_kstack = ks; 1356 } 1357 1358 for (i = 0; i < KSTACK_PAGES; i++) { 1359 /* 1360 * Get a kernel stack page. 1361 */ 1362 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1363 1364 /* 1365 * Wire the page. 1366 */ 1367 m->wire_count++; 1368 1369 /* 1370 * Enter the page into the kernel address space. 1371 */ 1372 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1373 1374 vm_page_wakeup(m); 1375 vm_page_flag_clear(m, PG_ZERO); 1376 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1377 m->valid = VM_PAGE_BITS_ALL; 1378 } |
1379} 1380 1381void
|
1656pmap_swapin_proc(struct proc *p)
|
1382pmap_dispose_proc(struct proc *p) |
1383{
|
1658
1659 /* XXX: coming soon... */
1660 return;
|
1384 TODO; |
1385} 1386 1387void
|
1664pmap_swapout_proc(struct proc *p)
|
1388pmap_dispose_thread(struct thread *td) |
1389{
|
1666
1667 /* XXX: coming soon... */
1668 return;
|
1390 TODO; |
1391} 1392
|
1671
1672/*
1673 * Create the kernel stack (including pcb for i386) for a new thread.
1674 * This routine directly affects the fork perf for a process and
1675 * create performance for a thread.
1676 */
|
1393void
|
1678pmap_new_thread(td)
1679 struct thread *td;
|
1394pmap_swapin_thread(struct thread *td) |
1395{
|
1681 /* XXX: coming soon... */
1682 return;
|
1396 TODO; |
1397} 1398
|
1685/*
1686 * Dispose the kernel stack for a thread that has exited.
1687 * This routine directly impacts the exit perf of a process and thread.
1688 */
|
1399void
|
1690pmap_dispose_thread(td)
1691 struct thread *td;
|
1400pmap_swapout_thread(struct thread *td) |
1401{
|
1693 /* XXX: coming soon... */
1694 return;
|
1402 TODO; |
1403} 1404 1405/*
|
1698 * Allow the Kernel stack for a thread to be prejudicially paged out.
|
1406 * Allocate a physical page of memory directly from the phys_avail map. 1407 * Can only be called from pmap_bootstrap before avail start and end are 1408 * calculated. |
1409 */
|
1700void
1701pmap_swapout_thread(td)
1702 struct thread *td;
|
1410static vm_offset_t 1411pmap_bootstrap_alloc(vm_size_t size, u_int align) |
1412{
|
1704 int i;
1705 vm_object_t ksobj;
1706 vm_offset_t ks;
1707 vm_page_t m;
|
1413 vm_offset_t s, e; 1414 int i, j; |
1415
|
1709 ksobj = td->td_kstack_obj;
1710 ks = td->td_kstack;
1711 for (i = 0; i < KSTACK_PAGES; i++) {
1712 m = vm_page_lookup(ksobj, i);
1713 if (m == NULL)
1714 panic("pmap_swapout_thread: kstack already missing?");
1715 vm_page_dirty(m);
1716 vm_page_unwire(m, 0);
1717 pmap_kremove(ks + i * PAGE_SIZE);
|
1416 size = round_page(size); 1417 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1418 if (align != 0) 1419 s = (phys_avail[i] + align - 1) & ~(align - 1); 1420 else 1421 s = phys_avail[i]; 1422 e = s + size; 1423 1424 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1425 continue; 1426 1427 if (s == phys_avail[i]) { 1428 phys_avail[i] += size; 1429 } else if (e == phys_avail[i + 1]) { 1430 phys_avail[i + 1] -= size; 1431 } else { 1432 for (j = phys_avail_count * 2; j > i; j -= 2) { 1433 phys_avail[j] = phys_avail[j - 2]; 1434 phys_avail[j + 1] = phys_avail[j - 1]; 1435 } 1436 1437 phys_avail[i + 3] = phys_avail[i + 1]; 1438 phys_avail[i + 1] = s; 1439 phys_avail[i + 2] = e; 1440 phys_avail_count++; 1441 } 1442 1443 return (s); |
1444 }
|
1445 panic("pmap_bootstrap_alloc: could not allocate memory"); |
1446} 1447 1448/*
|
1722 * Bring the kernel stack for a specified thread back in.
|
1449 * Return an unmapped pvo for a kernel virtual address. 1450 * Used by pmap functions that operate on physical pages. |
1451 */
|
1724void
1725pmap_swapin_thread(td)
1726 struct thread *td;
|
1452static struct pvo_entry * 1453pmap_rkva_alloc(void) |
1454{
|
1728 int i, rv;
1729 vm_object_t ksobj;
1730 vm_offset_t ks;
1731 vm_page_t m;
|
1455 struct pvo_entry *pvo; 1456 struct pte *pt; 1457 vm_offset_t kva; 1458 int pteidx; |
1459
|
1733 ksobj = td->td_kstack_obj;
1734 ks = td->td_kstack;
1735 for (i = 0; i < KSTACK_PAGES; i++) {
1736 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1737 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m));
1738 if (m->valid != VM_PAGE_BITS_ALL) {
1739 rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1740 if (rv != VM_PAGER_OK)
1741 panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
1742 m = vm_page_lookup(ksobj, i);
1743 m->valid = VM_PAGE_BITS_ALL;
|
1460 if (pmap_rkva_count == 0) 1461 panic("pmap_rkva_alloc: no more reserved KVAs"); 1462 1463 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1464 pmap_kenter(kva, 0); 1465 1466 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1467 1468 if (pvo == NULL) 1469 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1470 1471 pt = pmap_pvo_to_pte(pvo, pteidx); 1472 1473 if (pt == NULL) 1474 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1475 1476 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1477 PVO_PTEGIDX_CLR(pvo); 1478 1479 pmap_pte_overflow++; 1480 1481 return (pvo); 1482} 1483 1484static void 1485pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1486 int *depth_p) 1487{ 1488 struct pte *pt; 1489 1490 critical_enter(); 1491 1492 /* 1493 * If this pvo already has a valid pte, we need to save it so it can 1494 * be restored later. We then just reload the new PTE over the old 1495 * slot. 1496 */ 1497 if (saved_pt != NULL) { 1498 pt = pmap_pvo_to_pte(pvo, -1); 1499 1500 if (pt != NULL) { 1501 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1502 PVO_PTEGIDX_CLR(pvo); 1503 pmap_pte_overflow++; |
1504 }
|
1745 vm_page_wire(m);
1746 vm_page_wakeup(m);
1747 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
|
1505 1506 *saved_pt = pvo->pvo_pte; 1507 1508 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; |
1509 }
|
1510 1511 pvo->pvo_pte.pte_lo |= pa; 1512 1513 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1514 panic("pmap_pa_map: could not spill pvo %p", pvo); 1515 1516 if (depth_p != NULL) 1517 (*depth_p)++; 1518 1519 critical_exit(); |
1520} 1521
|
1751void
1752pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable)
|
1522static void 1523pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) |
1524{
|
1525 struct pte *pt; |
1526
|
1755 return;
|
1527 critical_enter(); 1528 1529 pt = pmap_pvo_to_pte(pvo, -1); 1530 1531 if (pt != NULL) { 1532 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1533 PVO_PTEGIDX_CLR(pvo); 1534 pmap_pte_overflow++; 1535 } 1536 1537 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1538 1539 /* 1540 * If there is a saved PTE and it's valid, restore it and return. 1541 */ 1542 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1543 if (depth_p != NULL && --(*depth_p) == 0) 1544 panic("pmap_pa_unmap: restoring but depth == 0"); 1545 1546 pvo->pvo_pte = *saved_pt; 1547 1548 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1549 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1550 } 1551 1552 critical_exit(); |
1553} 1554
|
1758void
1759pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
|
1555static void 1556pmap_syncicache(vm_offset_t pa, vm_size_t len) |
1557{
|
1558 __syncicache((void *)pa, len); 1559} |
1560
|
1762 /* XXX: coming soon... */
1763 return;
|
1561static void 1562tlbia(void) 1563{ 1564 caddr_t i; 1565 1566 SYNC(); 1567 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1568 TLBIE(i); 1569 EIEIO(); 1570 } 1571 TLBSYNC(); 1572 SYNC(); |
1573} 1574
|
1766void
1767pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry)
|
1575static int 1576pmap_pvo_enter(pmap_t pm, vm_zone_t zone, struct pvo_head *pvo_head, 1577 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) |
1578{
|
1579 struct pvo_entry *pvo; 1580 u_int sr; 1581 int first; 1582 u_int ptegidx; 1583 int i; |
1584
|
1770 /* XXX: coming soon... */
1771 return;
|
1585 pmap_pvo_enter_calls++; 1586 1587 /* 1588 * Compute the PTE Group index. 1589 */ 1590 va &= ~ADDR_POFF; 1591 sr = va_to_sr(pm->pm_sr, va); 1592 ptegidx = va_to_pteg(sr, va); 1593 1594 critical_enter(); 1595 1596 /* 1597 * Remove any existing mapping for this page. Reuse the pvo entry if 1598 * there is a mapping. 1599 */ 1600 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1601 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1602 pmap_pvo_remove(pvo, -1); 1603 break; 1604 } 1605 } 1606 1607 /* 1608 * If we aren't overwriting a mapping, try to allocate. 1609 */ 1610 critical_exit(); 1611 1612 pvo = zalloc(zone); 1613 1614 critical_enter(); 1615 1616 if (pvo == NULL) { 1617 critical_exit(); 1618 return (ENOMEM); 1619 } 1620 1621 pmap_pvo_entries++; 1622 pvo->pvo_vaddr = va; 1623 pvo->pvo_pmap = pm; 1624 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1625 pvo->pvo_vaddr &= ~ADDR_POFF; 1626 if (flags & VM_PROT_EXECUTE) 1627 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1628 if (flags & PVO_WIRED) 1629 pvo->pvo_vaddr |= PVO_WIRED; 1630 if (pvo_head != &pmap_pvo_kunmanaged) 1631 pvo->pvo_vaddr |= PVO_MANAGED; 1632 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1633 1634 /* 1635 * Remember if the list was empty and therefore will be the first 1636 * item. 1637 */ 1638 first = LIST_FIRST(pvo_head) == NULL; 1639 1640 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1641 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1642 pvo->pvo_pmap->pm_stats.wired_count++; 1643 pvo->pvo_pmap->pm_stats.resident_count++; 1644 1645 /* 1646 * We hope this succeeds but it isn't required. 1647 */ 1648 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1649 if (i >= 0) { 1650 PVO_PTEGIDX_SET(pvo, i); 1651 } else { 1652 panic("pmap_pvo_enter: overflow"); 1653 pmap_pte_overflow++; 1654 } 1655 1656 critical_exit(); 1657 1658 return (first ? ENOENT : 0); |
1659} 1660
|
1774void
1775pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
|
1661static void 1662pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) |
1663{
|
1664 struct pte *pt; |
1665
|
1778 /* XXX: coming soon... */
1779 return;
|
1666 /* 1667 * If there is an active pte entry, we need to deactivate it (and 1668 * save the ref & cfg bits). 1669 */ 1670 pt = pmap_pvo_to_pte(pvo, pteidx); 1671 if (pt != NULL) { 1672 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1673 PVO_PTEGIDX_CLR(pvo); 1674 } else { 1675 pmap_pte_overflow--; 1676 } 1677 1678 /* 1679 * Update our statistics. 1680 */ 1681 pvo->pvo_pmap->pm_stats.resident_count--; 1682 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1683 pvo->pvo_pmap->pm_stats.wired_count--; 1684 1685 /* 1686 * Save the REF/CHG bits into their cache if the page is managed. 1687 */ 1688 if (pvo->pvo_vaddr & PVO_MANAGED) { 1689 struct vm_page *pg; 1690 1691 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo * PTE_RPGN); 1692 if (pg != NULL) { 1693 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1694 (PTE_REF | PTE_CHG)); 1695 } 1696 } 1697 1698 /* 1699 * Remove this PVO from the PV list. 1700 */ 1701 LIST_REMOVE(pvo, pvo_vlink); 1702 1703 /* 1704 * Remove this from the overflow list and return it to the pool 1705 * if we aren't going to reuse it. 1706 */ 1707 LIST_REMOVE(pvo, pvo_olink); 1708 zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : pmap_upvo_zone, 1709 pvo); 1710 pmap_pvo_entries--; 1711 pmap_pvo_remove_calls++; |
1712} 1713
|
1782void
1783pmap_pinit0(pmap_t pmap)
|
1714static __inline int 1715pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) |
1716{
|
1717 int pteidx; |
1718
|
1786 /* XXX: coming soon... */
1787 return;
|
1719 /* 1720 * We can find the actual pte entry without searching by grabbing 1721 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1722 * noticing the HID bit. 1723 */ 1724 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1725 if (pvo->pvo_pte.pte_hi & PTE_HID) 1726 pteidx ^= pmap_pteg_mask * 8; 1727 1728 return (pteidx); |
1729} 1730
|
1790void
1791pmap_dispose_proc(struct proc *p)
|
1731static struct pvo_entry * 1732pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) |
1733{
|
1734 struct pvo_entry *pvo; 1735 int ptegidx; 1736 u_int sr; |
1737
|
1794 /* XXX: coming soon... */
1795 return;
|
1738 va &= ~ADDR_POFF; 1739 sr = va_to_sr(pm->pm_sr, va); 1740 ptegidx = va_to_pteg(sr, va); 1741 1742 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1743 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1744 if (pteidx_p) 1745 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1746 return (pvo); 1747 } 1748 } 1749 1750 return (NULL); |
1751} 1752
|
1798vm_offset_t
1799pmap_steal_memory(vm_size_t size)
|
1753static struct pte * 1754pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) |
1755{
|
1801 vm_size_t bank_size;
1802 vm_offset_t pa;
|
1756 struct pte *pt; |
1757
|
1804 size = round_page(size);
|
1758 /* 1759 * If we haven't been supplied the ptegidx, calculate it. 1760 */ 1761 if (pteidx == -1) { 1762 int ptegidx; 1763 u_int sr; |
1764
|
1806 bank_size = phys_avail[1] - phys_avail[0];
1807 while (size > bank_size) {
1808 int i;
1809 for (i = 0; phys_avail[i+2]; i+= 2) {
1810 phys_avail[i] = phys_avail[i+2];
1811 phys_avail[i+1] = phys_avail[i+3];
|
1765 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1766 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1767 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1768 } 1769 1770 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1771 1772 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1773 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 1774 "valid pte index", pvo); 1775 } 1776 1777 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1778 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 1779 "pvo but no valid pte", pvo); 1780 } 1781 1782 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1783 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1784 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 1785 "pmap_pteg_table %p but invalid in pvo", pvo, pt); |
1786 }
|
1813 phys_avail[i] = 0;
1814 phys_avail[i+1] = 0;
1815 if (!phys_avail[0])
1816 panic("pmap_steal_memory: out of memory");
1817 bank_size = phys_avail[1] - phys_avail[0];
|
1787 1788 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1789 != 0) { 1790 panic("pmap_pvo_to_pte: pvo %p pte does not match " 1791 "pte %p in pmap_pteg_table", pvo, pt); 1792 } 1793 1794 return (pt); |
1795 } 1796
|
1820 pa = phys_avail[0];
1821 phys_avail[0] += size;
|
1797 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1798 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 1799 "pmap_pteg_table but valid in pvo", pvo, pt); 1800 } |
1801
|
1823 bzero((caddr_t) pa, size);
1824 return pa;
|
1802 return (NULL); |
1803} 1804 1805/*
|
1828 * Create the UAREA_PAGES for a new process.
1829 * This routine directly affects the fork perf for a process.
|
1806 * XXX: THIS STUFF SHOULD BE IN pte.c? |
1807 */
|
1831void
1832pmap_new_proc(struct proc *p)
|
1808int 1809pmap_pte_spill(vm_offset_t addr) |
1810{
|
1834 int i;
1835 vm_object_t upobj;
1836 vm_offset_t up;
1837 vm_page_t m;
1838 pte_t pte;
1839 sr_t sr;
1840 int idx;
1841 vm_offset_t va;
|
1811 struct pvo_entry *source_pvo, *victim_pvo; 1812 struct pvo_entry *pvo; 1813 int ptegidx, i, j; 1814 u_int sr; 1815 struct pteg *pteg; 1816 struct pte *pt; |
1817
|
1818 pmap_pte_spills++; 1819 1820 __asm __volatile("mfsrin %0,%1" : "=r"(sr) : "r"(addr)); 1821 ptegidx = va_to_pteg(sr, addr); 1822 |
1823 /*
|
1844 * allocate object for the upages
|
1824 * Have to substitute some entry. Use the primary hash for this. 1825 * Use low bits of timebase as random generator. |
1826 */
|
1846 upobj = p->p_upages_obj;
1847 if (upobj == NULL) {
1848 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES);
1849 p->p_upages_obj = upobj;
1850 }
|
1827 pteg = &pmap_pteg_table[ptegidx]; 1828 __asm __volatile("mftb %0" : "=r"(i)); 1829 i &= 7; 1830 pt = &pteg->pt[i]; |
1831
|
1852 /* get a kernel virtual address for the UAREA_PAGES for this proc */
1853 up = (vm_offset_t)p->p_uarea;
1854 if (up == 0) {
1855 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE);
1856 if (up == 0)
1857 panic("pmap_new_proc: upage allocation failed");
1858 p->p_uarea = (struct user *)up;
1859 }
1860
1861 for (i = 0; i < UAREA_PAGES; i++) {
|
1832 source_pvo = NULL; 1833 victim_pvo = NULL; 1834 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { |
1835 /*
|
1863 * Get a kernel stack page
|
1836 * We need to find a pvo entry for this address. |
1837 */
|
1865 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
|
1838 PMAP_PVO_CHECK(pvo); 1839 if (source_pvo == NULL && 1840 pmap_pte_match(&pvo->pvo_pte, sr, addr, 1841 pvo->pvo_pte.pte_hi & PTE_HID)) { 1842 /* 1843 * Now found an entry to be spilled into the pteg. 1844 * The PTE is now valid, so we know it's active. 1845 */ 1846 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); |
1847
|
1848 if (j >= 0) { 1849 PVO_PTEGIDX_SET(pvo, j); 1850 pmap_pte_overflow--; 1851 PMAP_PVO_CHECK(pvo); 1852 return (1); 1853 } 1854 1855 source_pvo = pvo; 1856 1857 if (victim_pvo != NULL) 1858 break; 1859 } 1860 |
1861 /*
|
1868 * Wire the page
|
1862 * We also need the pvo entry of the victim we are replacing 1863 * so save the R & C bits of the PTE. |
1864 */
|
1870 m->wire_count++;
1871 cnt.v_wire_count++;
|
1865 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1866 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1867 victim_pvo = pvo; 1868 if (source_pvo != NULL) 1869 break; 1870 } 1871 } |
1872
|
1873 if (source_pvo == NULL) 1874 return (0); 1875 1876 if (victim_pvo == NULL) { 1877 if ((pt->pte_hi & PTE_HID) == 0) 1878 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 1879 "entry", pt); 1880 |
1881 /*
|
1874 * Enter the page into the kernel address space.
|
1882 * If this is a secondary PTE, we need to search it's primary 1883 * pvo bucket for the matching PVO. |
1884 */
|
1876 va = up + i * PAGE_SIZE;
1877 idx = pteidx(sr = ptesr(kernel_pmap->pm_sr, va), va);
|
1885 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 1886 pvo_olink) { 1887 PMAP_PVO_CHECK(pvo); 1888 /* 1889 * We also need the pvo entry of the victim we are 1890 * replacing so save the R & C bits of the PTE. 1891 */ 1892 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1893 victim_pvo = pvo; 1894 break; 1895 } 1896 } |
1897
|
1879 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) |
1880 ((va & ADDR_PIDX) >> ADDR_API_SHFT);
1881 pte.pte_lo = (VM_PAGE_TO_PHYS(m) & PTE_RPGN) | PTE_M | PTE_I |
1882 PTE_G | PTE_RW;
|
1898 if (victim_pvo == NULL) 1899 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 1900 "entry", pt); 1901 } |
1902
|
1884 if (!pte_insert(idx, &pte)) {
1885 struct pte_ovfl *po;
|
1903 /* 1904 * We are invalidating the TLB entry for the EA we are replacing even 1905 * though it's valid. If we don't, we lose any ref/chg bit changes 1906 * contained in the TLB entry. 1907 */ 1908 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; |
1909
|
1887 po = poalloc();
1888 po->po_pte = pte;
1889 LIST_INSERT_HEAD(potable + idx, po, po_list);
1890 }
|
1910 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1911 pmap_pte_set(pt, &source_pvo->pvo_pte); |
1912
|
1892 tlbie(va);
|
1913 PVO_PTEGIDX_CLR(victim_pvo); 1914 PVO_PTEGIDX_SET(source_pvo, i); 1915 pmap_pte_replacements++; |
1916
|
1894 vm_page_wakeup(m);
1895 vm_page_flag_clear(m, PG_ZERO);
1896 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1897 m->valid = VM_PAGE_BITS_ALL;
|
1917 PMAP_PVO_CHECK(victim_pvo); 1918 PMAP_PVO_CHECK(source_pvo); 1919 1920 return (1); 1921} 1922 1923static int 1924pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 1925{ 1926 struct pte *pt; 1927 int i; 1928 1929 /* 1930 * First try primary hash. 1931 */ 1932 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 1933 if ((pt->pte_hi & PTE_VALID) == 0) { 1934 pvo_pt->pte_hi &= ~PTE_HID; 1935 pmap_pte_set(pt, pvo_pt); 1936 return (i); 1937 } |
1938 }
|
1939 1940 /* 1941 * Now try secondary hash. 1942 */ 1943 ptegidx ^= pmap_pteg_mask; 1944 ptegidx++; 1945 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 1946 if ((pt->pte_hi & PTE_VALID) == 0) { 1947 pvo_pt->pte_hi |= PTE_HID; 1948 pmap_pte_set(pt, pvo_pt); 1949 return (i); 1950 } 1951 } 1952 1953 panic("pmap_pte_insert: overflow"); 1954 return (-1); |
1955} 1956
|
1901void *
1902pmap_mapdev(vm_offset_t pa, vm_size_t len)
|
1957static boolean_t 1958pmap_query_bit(vm_page_t m, int ptebit) |
1959{
|
1904 vm_offset_t faddr;
1905 vm_offset_t taddr, va;
1906 int off;
|
1960 struct pvo_entry *pvo; 1961 struct pte *pt; |
1962
|
1908 faddr = trunc_page(pa);
1909 off = pa - faddr;
1910 len = round_page(off + len);
|
1963 if (pmap_attr_fetch(m) & ptebit) 1964 return (TRUE); |
1965
|
1912 GIANT_REQUIRED;
|
1966 critical_enter(); |
1967
|
1914 va = taddr = kmem_alloc_pageable(kernel_map, len);
|
1968 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1969 PMAP_PVO_CHECK(pvo); /* sanity check */ |
1970
|
1916 if (va == 0)
1917 return NULL;
|
1971 /* 1972 * See if we saved the bit off. If so, cache it and return 1973 * success. 1974 */ 1975 if (pvo->pvo_pte.pte_lo & ptebit) { 1976 pmap_attr_save(m, ptebit); 1977 PMAP_PVO_CHECK(pvo); /* sanity check */ 1978 critical_exit(); 1979 return (TRUE); 1980 } 1981 } |
1982
|
1919 for (; len > 0; len -= PAGE_SIZE) {
1920 pmap_kenter(taddr, faddr);
1921 faddr += PAGE_SIZE;
1922 taddr += PAGE_SIZE;
|
1983 /* 1984 * No luck, now go through the hard part of looking at the PTEs 1985 * themselves. Sync so that any pending REF/CHG bits are flushed to 1986 * the PTEs. 1987 */ 1988 SYNC(); 1989 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1990 PMAP_PVO_CHECK(pvo); /* sanity check */ 1991 1992 /* 1993 * See if this pvo has a valid PTE. if so, fetch the 1994 * REF/CHG bits from the valid PTE. If the appropriate 1995 * ptebit is set, cache it and return success. 1996 */ 1997 pt = pmap_pvo_to_pte(pvo, -1); 1998 if (pt != NULL) { 1999 pmap_pte_synch(pt, &pvo->pvo_pte); 2000 if (pvo->pvo_pte.pte_lo & ptebit) { 2001 pmap_attr_save(m, ptebit); 2002 PMAP_PVO_CHECK(pvo); /* sanity check */ 2003 critical_exit(); 2004 return (TRUE); 2005 } 2006 } |
2007 } 2008
|
1925 return (void *)(va + off);
|
2009 critical_exit(); 2010 return (TRUE); |
2011}
|
2012 2013static boolean_t 2014pmap_clear_bit(vm_page_t m, int ptebit) 2015{ 2016 struct pvo_entry *pvo; 2017 struct pte *pt; 2018 int rv; 2019 2020 critical_enter(); 2021 2022 /* 2023 * Clear the cached value. 2024 */ 2025 rv = pmap_attr_fetch(m); 2026 pmap_attr_clear(m, ptebit); 2027 2028 /* 2029 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2030 * we can reset the right ones). note that since the pvo entries and 2031 * list heads are accessed via BAT0 and are never placed in the page 2032 * table, we don't have to worry about further accesses setting the 2033 * REF/CHG bits. 2034 */ 2035 SYNC(); 2036 2037 /* 2038 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2039 * valid pte clear the ptebit from the valid pte. 2040 */ 2041 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2042 PMAP_PVO_CHECK(pvo); /* sanity check */ 2043 pt = pmap_pvo_to_pte(pvo, -1); 2044 if (pt != NULL) { 2045 pmap_pte_synch(pt, &pvo->pvo_pte); 2046 if (pvo->pvo_pte.pte_lo & ptebit) 2047 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2048 } 2049 rv |= pvo->pvo_pte.pte_lo; 2050 pvo->pvo_pte.pte_lo &= ~ptebit; 2051 PMAP_PVO_CHECK(pvo); /* sanity check */ 2052 } 2053 2054 critical_exit(); 2055 return ((rv & ptebit) != 0); 2056} |
|