pmap.c revision 7490
1136384Srwatson/* 2136384Srwatson * Copyright (c) 1991 Regents of the University of California. 3136384Srwatson * All rights reserved. 4136384Srwatson * Copyright (c) 1994 John S. Dyson 5136384Srwatson * All rights reserved. 6136384Srwatson * Copyright (c) 1994 David Greenman 7136384Srwatson * All rights reserved. 8136384Srwatson * 9136384Srwatson * This code is derived from software contributed to Berkeley by 10136384Srwatson * the Systems Programming Group of the University of Utah Computer 11136384Srwatson * Science Department and William Jolitz of UUNET Technologies Inc. 12136384Srwatson * 13136384Srwatson * Redistribution and use in source and binary forms, with or without 14136384Srwatson * modification, are permitted provided that the following conditions 15136384Srwatson * are met: 16136384Srwatson * 1. Redistributions of source code must retain the above copyright 17136384Srwatson * notice, this list of conditions and the following disclaimer. 18136384Srwatson * 2. Redistributions in binary form must reproduce the above copyright 19136384Srwatson * notice, this list of conditions and the following disclaimer in the 20136384Srwatson * documentation and/or other materials provided with the distribution. 21136384Srwatson * 3. All advertising materials mentioning features or use of this software 22136384Srwatson * must display the following acknowledgement: 23136384Srwatson * This product includes software developed by the University of 24136384Srwatson * California, Berkeley and its contributors. 25136384Srwatson * 4. Neither the name of the University nor the names of its contributors 26136384Srwatson * may be used to endorse or promote products derived from this software 27136384Srwatson * without specific prior written permission. 28136384Srwatson * 29136384Srwatson * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30136384Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31136384Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32136384Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33136384Srwatson * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34136384Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35166437Sbms * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36136384Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37136384Srwatson * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38136384Srwatson * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39166437Sbms * SUCH DAMAGE. 40136384Srwatson * 41136384Srwatson * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 42136384Srwatson * $Id: pmap.c,v 1.54 1995/03/26 23:42:07 davidg Exp $ 43136384Srwatson */ 44136384Srwatson 45166437Sbms/* 46166437Sbms * Derived from hp300 version by Mike Hibler, this version by William 47166437Sbms * Jolitz uses a recursive map [a pde points to the page directory] to 48166437Sbms * map the page tables using the pagetables themselves. This is done to 49136384Srwatson * reduce the impact on kernel virtual memory for lots of sparse address 50136384Srwatson * space, and to reduce the cost of memory to each process. 51136384Srwatson * 52136384Srwatson * Derived from: hp300/@(#)pmap.c 7.1 (Berkeley) 12/5/90 53136384Srwatson */ 54136384Srwatson/* 55136384Srwatson * Major modifications by John S. Dyson primarily to support 56136384Srwatson * pageable page tables, eliminating pmap_attributes, 57136384Srwatson * discontiguous memory pages, and using more efficient string 58136390Srwatson * instructions. Jan 13, 1994. Further modifications on Mar 2, 1994, 59136390Srwatson * general clean-up and efficiency mods. 60136390Srwatson */ 61136390Srwatson 62136390Srwatson/* 63136390Srwatson * Manages physical address maps. 64136390Srwatson * 65136390Srwatson * In addition to hardware address maps, this 66136390Srwatson * module is called upon to provide software-use-only 67136390Srwatson * maps which may or may not be stored in the same 68136390Srwatson * form as hardware maps. These pseudo-maps are 69136390Srwatson * used to store intermediate results from copy 70136390Srwatson * operations to and from address spaces. 71136390Srwatson * 72136390Srwatson * Since the information managed by this module is 73136390Srwatson * also stored by the logical address mapping module, 74136390Srwatson * this module may throw away valid virtual-to-physical 75136390Srwatson * mappings at almost any time. However, invalidations 76136390Srwatson * of virtual-to-physical mappings must be done as 77136390Srwatson * requested. 78136390Srwatson * 79136390Srwatson * In order to cope with hardware architectures which 80136390Srwatson * make virtual-to-physical map invalidates expensive, 81136390Srwatson * this module may delay invalidate or reduced protection 82136390Srwatson * operations until such time as they are actually 83136390Srwatson * necessary. This module is given full information as 84136390Srwatson * to which processors are currently using which maps, 85136390Srwatson * and to when physical maps must be made correct. 86136390Srwatson */ 87136390Srwatson 88136390Srwatson#include <sys/param.h> 89136390Srwatson#include <sys/systm.h> 90136390Srwatson#include <sys/proc.h> 91136390Srwatson#include <sys/malloc.h> 92136390Srwatson#include <sys/user.h> 93136390Srwatson 94136390Srwatson#include <vm/vm.h> 95136390Srwatson#include <vm/vm_kern.h> 96136390Srwatson#include <vm/vm_page.h> 97136390Srwatson 98136390Srwatson#include <machine/cputypes.h> 99136390Srwatson#include <machine/md_var.h> 100136390Srwatson 101136390Srwatson#include <i386/isa/isa.h> 102136390Srwatson 103136390Srwatson/* 104136390Srwatson * Allocate various and sundry SYSMAPs used in the days of old VM 105136384Srwatson * and not yet converted. XXX. 106136384Srwatson */ 107136384Srwatson#define BSDVM_COMPAT 1 108136384Srwatson 109136384Srwatson/* 110136384Srwatson * Get PDEs and PTEs for user/kernel address space 111136384Srwatson */ 112136384Srwatson#define pmap_pde(m, v) (&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023])) 113136384Srwatson#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023]) 114136390Srwatson 115136384Srwatson#define pmap_pte_pa(pte) (*(int *)(pte) & PG_FRAME) 116136384Srwatson 117136384Srwatson#define pmap_pde_v(pte) ((*(int *)pte & PG_V) != 0) 118136384Srwatson#define pmap_pte_w(pte) ((*(int *)pte & PG_W) != 0) 119136384Srwatson#define pmap_pte_m(pte) ((*(int *)pte & PG_M) != 0) 120136384Srwatson#define pmap_pte_u(pte) ((*(int *)pte & PG_U) != 0) 121136384Srwatson#define pmap_pte_v(pte) ((*(int *)pte & PG_V) != 0) 122136384Srwatson 123136384Srwatson#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W)) 124136384Srwatson#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v))) 125136390Srwatson 126136390Srwatson/* 127136384Srwatson * Given a map and a machine independent protection code, 128136384Srwatson * convert to a vax protection code. 129136390Srwatson */ 130136390Srwatson#define pte_prot(m, p) (protection_codes[p]) 131136384Srwatsonint protection_codes[8]; 132136384Srwatson 133136384Srwatsonstruct pmap kernel_pmap_store; 134136384Srwatsonpmap_t kernel_pmap; 135136384Srwatson 136136384Srwatsonvm_offset_t phys_avail[6]; /* 2 entries + 1 null */ 137136384Srwatsonvm_offset_t avail_start; /* PA of first available physical page */ 138136384Srwatsonvm_offset_t avail_end; /* PA of last available physical page */ 139136384Srwatsonvm_size_t mem_size; /* memory size in bytes */ 140136384Srwatsonvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 141136384Srwatsonvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 142136390Srwatsonint i386pagesperpage; /* PAGE_SIZE / I386_PAGE_SIZE */ 143136390Srwatsonboolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 144136384Srwatsonvm_offset_t vm_first_phys, vm_last_phys; 145136384Srwatson 146136384Srwatsonstatic inline int pmap_is_managed(); 147136384Srwatsonstatic void i386_protection_init(); 148136384Srwatsonstatic void pmap_alloc_pv_entry(); 149136384Srwatsonstatic inline pv_entry_t get_pv_entry(); 150136384Srwatsonint nkpt; 151136384Srwatson 152136384Srwatson 153136384Srwatsonextern vm_offset_t clean_sva, clean_eva; 154136384Srwatsonextern int cpu_class; 155136390Srwatson 156136390Srwatson#if BSDVM_COMPAT 157136384Srwatson#include <sys/msgbuf.h> 158136384Srwatson 159136384Srwatson/* 160136384Srwatson * All those kernel PT submaps that BSD is so fond of 161136384Srwatson */ 162136390Srwatsonpt_entry_t *CMAP1, *CMAP2, *ptmmap; 163136390Srwatsonpv_entry_t pv_table; 164136384Srwatsoncaddr_t CADDR1, CADDR2, ptvmmap; 165136384Srwatsonpt_entry_t *msgbufmap; 166136384Srwatsonstruct msgbuf *msgbufp; 167136384Srwatson 168136384Srwatson#endif 169136384Srwatson 170136390Srwatsonvoid 171136390Srwatsoninit_pv_entries(int); 172136384Srwatson 173136384Srwatson/* 174136384Srwatson * Routine: pmap_pte 175136384Srwatson * Function: 176136384Srwatson * Extract the page table entry associated 177136390Srwatson * with the given map/virtual_address pair. 178136390Srwatson * [ what about induced faults -wfj] 179136390Srwatson */ 180136384Srwatson 181136384Srwatsoninline pt_entry_t * const 182136384Srwatsonpmap_pte(pmap, va) 183136384Srwatson register pmap_t pmap; 184136384Srwatson vm_offset_t va; 185136390Srwatson{ 186136390Srwatson 187136384Srwatson if (pmap && *pmap_pde(pmap, va)) { 188136384Srwatson vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 189136384Srwatson 190136384Srwatson /* are we current address space or kernel? */ 191136384Srwatson if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME))) 192136384Srwatson return ((pt_entry_t *) vtopte(va)); 193136384Srwatson /* otherwise, we are alternate address space */ 194136384Srwatson else { 195136390Srwatson if (frame != ((int) APTDpde & PG_FRAME)) { 196136390Srwatson APTDpde = pmap->pm_pdir[PTDPTDI]; 197136384Srwatson pmap_update(); 198136384Srwatson } 199136390Srwatson return ((pt_entry_t *) avtopte(va)); 200136390Srwatson } 201136384Srwatson } 202136384Srwatson return (0); 203136384Srwatson} 204136384Srwatson 205136384Srwatson/* 206136384Srwatson * Routine: pmap_extract 207136384Srwatson * Function: 208136384Srwatson * Extract the physical page address associated 209136384Srwatson * with the given map/virtual_address pair. 210136390Srwatson */ 211136390Srwatson 212136390Srwatsonvm_offset_t 213136384Srwatsonpmap_extract(pmap, va) 214136384Srwatson register pmap_t pmap; 215136384Srwatson vm_offset_t va; 216136384Srwatson{ 217136384Srwatson vm_offset_t pa; 218136384Srwatson 219136384Srwatson if (pmap && *pmap_pde(pmap, va)) { 220136384Srwatson vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 221136384Srwatson 222136384Srwatson /* are we current address space or kernel? */ 223136384Srwatson if ((pmap == kernel_pmap) 224136390Srwatson || (frame == ((int) PTDpde & PG_FRAME))) { 225136390Srwatson pa = *(int *) vtopte(va); 226136384Srwatson /* otherwise, we are alternate address space */ 227136384Srwatson } else { 228136384Srwatson if (frame != ((int) APTDpde & PG_FRAME)) { 229136390Srwatson APTDpde = pmap->pm_pdir[PTDPTDI]; 230136390Srwatson pmap_update(); 231136384Srwatson } 232136390Srwatson pa = *(int *) avtopte(va); 233136390Srwatson } 234136390Srwatson return ((pa & PG_FRAME) | (va & ~PG_FRAME)); 235136390Srwatson } 236136384Srwatson return 0; 237136384Srwatson 238136384Srwatson} 239136390Srwatson 240136390Srwatson/* 241136390Srwatson * determine if a page is managed (memory vs. device) 242136384Srwatson */ 243136390Srwatsonstatic inline int 244136390Srwatsonpmap_is_managed(pa) 245136390Srwatson vm_offset_t pa; 246136390Srwatson{ 247136384Srwatson int i; 248136384Srwatson 249136384Srwatson if (!pmap_initialized) 250136384Srwatson return 0; 251136384Srwatson 252136384Srwatson for (i = 0; phys_avail[i + 1]; i += 2) { 253136384Srwatson if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) 254136390Srwatson return 1; 255136390Srwatson } 256136384Srwatson return 0; 257136384Srwatson} 258136384Srwatson 259136390Srwatson/* 260136390Srwatson * find the vm_page_t of a pte (only) given va of pte and pmap 261136390Srwatson */ 262136384Srwatson__inline vm_page_t 263136390Srwatsonpmap_pte_vm_page(pmap, pt) 264136390Srwatson pmap_t pmap; 265136390Srwatson vm_offset_t pt; 266136390Srwatson{ 267136384Srwatson vm_page_t m; 268136384Srwatson 269136390Srwatson pt = i386_trunc_page(pt); 270136390Srwatson pt = (pt - UPT_MIN_ADDRESS) / NBPG; 271136390Srwatson pt = ((vm_offset_t) pmap->pm_pdir[pt]) & PG_FRAME; 272136384Srwatson m = PHYS_TO_VM_PAGE(pt); 273136390Srwatson return m; 274136390Srwatson} 275136390Srwatson 276136390Srwatson/* 277136384Srwatson * Wire a page table page 278136384Srwatson */ 279136384Srwatson__inline void 280136384Srwatsonpmap_use_pt(pmap, va) 281136384Srwatson pmap_t pmap; 282136384Srwatson vm_offset_t va; 283136384Srwatson{ 284136384Srwatson vm_offset_t pt; 285136384Srwatson 286136384Srwatson if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 287136384Srwatson return; 288136384Srwatson 289136384Srwatson pt = (vm_offset_t) vtopte(va); 290136384Srwatson vm_page_hold(pmap_pte_vm_page(pmap, pt)); 291136384Srwatson} 292136384Srwatson 293136384Srwatson/* 294136390Srwatson * Unwire a page table page 295136390Srwatson */ 296136384Srwatsoninline void 297136384Srwatsonpmap_unuse_pt(pmap, va) 298136384Srwatson pmap_t pmap; 299136384Srwatson vm_offset_t va; 300136384Srwatson{ 301136384Srwatson vm_offset_t pt; 302136384Srwatson vm_page_t m; 303136390Srwatson 304136390Srwatson if ((va >= UPT_MIN_ADDRESS) || !pmap_initialized) 305136390Srwatson return; 306136384Srwatson 307136390Srwatson pt = (vm_offset_t) vtopte(va); 308136390Srwatson m = pmap_pte_vm_page(pmap, pt); 309136384Srwatson vm_page_unhold(m); 310136384Srwatson if (pmap != kernel_pmap && 311136390Srwatson (m->hold_count == 0) && 312136390Srwatson (m->wire_count == 0) && 313136384Srwatson (va < KPT_MIN_ADDRESS)) { 314136384Srwatson pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 315136384Srwatson vm_page_free(m); 316136384Srwatson } 317136384Srwatson} 318136384Srwatson 319136384Srwatson/* [ macro again?, should I force kstack into user map here? -wfj ] */ 320136390Srwatsonvoid 321136390Srwatsonpmap_activate(pmap, pcbp) 322136384Srwatson register pmap_t pmap; 323136384Srwatson struct pcb *pcbp; 324136384Srwatson{ 325136384Srwatson PMAP_ACTIVATE(pmap, pcbp); 326136384Srwatson} 327136384Srwatson 328136384Srwatson/* 329136390Srwatson * Bootstrap the system enough to run with virtual memory. 330136390Srwatson * Map the kernel's code and data, and allocate the system page table. 331136390Srwatson * 332136384Srwatson * On the I386 this is called after mapping has already been enabled 333136390Srwatson * and just syncs the pmap module with what has already been done. 334136390Srwatson * [We can't call it easily with mapping off since the kernel is not 335136384Srwatson * mapped with PA == VA, hence we would have to relocate every address 336136384Srwatson * from the linked base (virtual) address "KERNBASE" to the actual 337136384Srwatson * (physical) address starting relative to 0] 338136390Srwatson */ 339136390Srwatson 340136384Srwatson#define DMAPAGES 8 341136384Srwatsonvoid 342136384Srwatsonpmap_bootstrap(firstaddr, loadaddr) 343136384Srwatson vm_offset_t firstaddr; 344136384Srwatson vm_offset_t loadaddr; 345136384Srwatson{ 346136384Srwatson#if BSDVM_COMPAT 347136384Srwatson vm_offset_t va; 348136390Srwatson pt_entry_t *pte; 349136390Srwatson 350136384Srwatson#endif 351136384Srwatson 352136384Srwatson avail_start = firstaddr + DMAPAGES * NBPG; 353136384Srwatson 354136384Srwatson virtual_avail = (vm_offset_t) KERNBASE + avail_start; 355136384Srwatson virtual_end = VM_MAX_KERNEL_ADDRESS; 356136384Srwatson i386pagesperpage = PAGE_SIZE / NBPG; 357136390Srwatson 358136390Srwatson /* 359136390Srwatson * Initialize protection array. 360136384Srwatson */ 361136390Srwatson i386_protection_init(); 362136390Srwatson 363136384Srwatson /* 364136384Srwatson * The kernel's pmap is statically allocated so we don't have to use 365136384Srwatson * pmap_create, which is unlikely to work correctly at this part of 366136384Srwatson * the boot sequence. 367136384Srwatson */ 368136384Srwatson kernel_pmap = &kernel_pmap_store; 369136384Srwatson 370136384Srwatson kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD); 371136384Srwatson 372136384Srwatson simple_lock_init(&kernel_pmap->pm_lock); 373136384Srwatson kernel_pmap->pm_count = 1; 374136384Srwatson nkpt = NKPT; 375136384Srwatson 376136384Srwatson#if BSDVM_COMPAT 377136384Srwatson /* 378136384Srwatson * Allocate all the submaps we need 379136384Srwatson */ 380136384Srwatson#define SYSMAP(c, p, v, n) \ 381136384Srwatson v = (c)va; va += ((n)*NBPG); p = pte; pte += (n); 382136384Srwatson 383136384Srwatson va = virtual_avail; 384136384Srwatson pte = pmap_pte(kernel_pmap, va); 385136384Srwatson 386136384Srwatson SYSMAP(caddr_t, CMAP1, CADDR1, 1) 387136384Srwatson SYSMAP(caddr_t, CMAP2, CADDR2, 1) 388136384Srwatson SYSMAP(caddr_t, ptmmap, ptvmmap, 1) 389136384Srwatson SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1) 390136384Srwatson virtual_avail = va; 391136390Srwatson#endif 392136390Srwatson /* 393136384Srwatson * Reserve special hunk of memory for use by bus dma as a bounce 394136390Srwatson * buffer (contiguous virtual *and* physical memory). 395136384Srwatson */ 396136384Srwatson { 397136384Srwatson isaphysmem = va; 398136384Srwatson 399136384Srwatson virtual_avail = pmap_map(va, firstaddr, 400136384Srwatson firstaddr + DMAPAGES * NBPG, VM_PROT_ALL); 401136384Srwatson } 402136384Srwatson 403136384Srwatson *(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0; 404136384Srwatson pmap_update(); 405136390Srwatson 406136390Srwatson} 407136384Srwatson 408136384Srwatson/* 409136390Srwatson * Initialize the pmap module. 410136390Srwatson * Called by vm_init, to initialize any structures that the pmap 411136384Srwatson * system needs to map virtual memory. 412136384Srwatson * pmap_init has been enhanced to support in a fairly consistant 413136390Srwatson * way, discontiguous physical memory. 414136390Srwatson */ 415136384Srwatsonvoid 416136384Srwatsonpmap_init(phys_start, phys_end) 417136390Srwatson vm_offset_t phys_start, phys_end; 418136390Srwatson{ 419136390Srwatson vm_offset_t addr; 420136384Srwatson vm_size_t npg, s; 421136384Srwatson int i; 422136384Srwatson 423136384Srwatson /* 424136384Srwatson * Now that kernel map has been allocated, we can mark as unavailable 425136384Srwatson * regions which we have mapped in locore. 426136384Srwatson */ 427136390Srwatson addr = atdevbase; 428136390Srwatson (void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0, 429136384Srwatson &addr, (0x100000 - 0xa0000), FALSE); 430136384Srwatson 431136384Srwatson addr = (vm_offset_t) KERNBASE + IdlePTD; 432136384Srwatson vm_object_reference(kernel_object); 433136384Srwatson (void) vm_map_find(kernel_map, kernel_object, addr, 434136384Srwatson &addr, (4 + NKPDE) * NBPG, FALSE); 435136384Srwatson 436136384Srwatson /* 437136390Srwatson * calculate the number of pv_entries needed 438136390Srwatson */ 439136384Srwatson vm_first_phys = phys_avail[0]; 440136384Srwatson for (i = 0; phys_avail[i + 1]; i += 2); 441136390Srwatson npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG; 442136390Srwatson 443136384Srwatson /* 444136384Srwatson * Allocate memory for random pmap data structures. Includes the 445136390Srwatson * pv_head_table. 446136390Srwatson */ 447136384Srwatson s = (vm_size_t) (sizeof(struct pv_entry) * npg); 448136384Srwatson s = i386_round_page(s); 449136390Srwatson addr = (vm_offset_t) kmem_alloc(kernel_map, s); 450136390Srwatson pv_table = (pv_entry_t) addr; 451136384Srwatson 452136384Srwatson /* 453136384Srwatson * init the pv free list 454136384Srwatson */ 455136384Srwatson init_pv_entries(npg); 456136384Srwatson /* 457136384Srwatson * Now it is safe to enable pv_table recording. 458136390Srwatson */ 459136390Srwatson pmap_initialized = TRUE; 460136384Srwatson} 461136384Srwatson 462136390Srwatson/* 463136390Srwatson * Used to map a range of physical addresses into kernel 464136390Srwatson * virtual address space. 465136384Srwatson * 466136384Srwatson * For now, VM is already on, we only need to map the 467136390Srwatson * specified memory. 468136390Srwatson */ 469136384Srwatsonvm_offset_t 470136384Srwatsonpmap_map(virt, start, end, prot) 471136390Srwatson vm_offset_t virt; 472136390Srwatson vm_offset_t start; 473136390Srwatson vm_offset_t end; 474136384Srwatson int prot; 475136384Srwatson{ 476136384Srwatson while (start < end) { 477136384Srwatson pmap_enter(kernel_pmap, virt, start, prot, FALSE); 478136384Srwatson virt += PAGE_SIZE; 479136384Srwatson start += PAGE_SIZE; 480136384Srwatson } 481136384Srwatson return (virt); 482136384Srwatson} 483136384Srwatson 484136384Srwatson/* 485136384Srwatson * Create and return a physical map. 486136384Srwatson * 487136384Srwatson * If the size specified for the map 488136390Srwatson * is zero, the map is an actual physical 489136384Srwatson * map, and may be referenced by the 490136390Srwatson * hardware. 491136390Srwatson * 492136384Srwatson * If the size specified is non-zero, 493136384Srwatson * the map will be used in software only, and 494136384Srwatson * is bounded by that size. 495136384Srwatson * 496136384Srwatson */ 497136384Srwatson 498136390Srwatsonpmap_t 499136390Srwatsonpmap_create(size) 500136384Srwatson vm_size_t size; 501136384Srwatson{ 502136390Srwatson register pmap_t pmap; 503136390Srwatson 504136390Srwatson /* 505136384Srwatson * Software use map does not need a pmap 506136384Srwatson */ 507136390Srwatson if (size) 508136390Srwatson return (NULL); 509136384Srwatson 510136384Srwatson pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK); 511136390Srwatson bzero(pmap, sizeof(*pmap)); 512136390Srwatson pmap_pinit(pmap); 513136390Srwatson return (pmap); 514136384Srwatson} 515136384Srwatson 516136384Srwatson/* 517136384Srwatson * Initialize a preallocated and zeroed pmap structure, 518136384Srwatson * such as one in a vmspace structure. 519136384Srwatson */ 520136384Srwatsonvoid 521136384Srwatsonpmap_pinit(pmap) 522136384Srwatson register struct pmap *pmap; 523136384Srwatson{ 524136384Srwatson /* 525136384Srwatson * No need to allocate page table space yet but we do need a valid 526136384Srwatson * page directory table. 527136384Srwatson */ 528136384Srwatson pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE); 529136384Srwatson 530136384Srwatson /* wire in kernel global address entries */ 531136390Srwatson bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE); 532136390Srwatson 533136384Srwatson /* install self-referential address mapping entry */ 534136390Srwatson *(int *) (pmap->pm_pdir + PTDPTDI) = 535136384Srwatson ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_KW; 536136384Srwatson 537136384Srwatson pmap->pm_count = 1; 538136384Srwatson simple_lock_init(&pmap->pm_lock); 539136384Srwatson} 540136384Srwatson 541136384Srwatson/* 542136384Srwatson * grow the number of kernel page table entries, if needed 543136384Srwatson */ 544136384Srwatson 545136384Srwatsonvm_page_t nkpg; 546136384Srwatsonvm_offset_t kernel_vm_end; 547136384Srwatson 548136384Srwatsonvoid 549136384Srwatsonpmap_growkernel(vm_offset_t addr) 550136384Srwatson{ 551136384Srwatson struct proc *p; 552136384Srwatson struct pmap *pmap; 553136384Srwatson int s; 554136390Srwatson 555136390Srwatson s = splhigh(); 556136384Srwatson if (kernel_vm_end == 0) { 557136384Srwatson kernel_vm_end = KERNBASE; 558136390Srwatson nkpt = 0; 559136390Srwatson while (pdir_pde(PTD, kernel_vm_end)) { 560136384Srwatson kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 561136384Srwatson ++nkpt; 562136390Srwatson } 563136390Srwatson } 564136390Srwatson addr = (addr + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 565136384Srwatson while (kernel_vm_end < addr) { 566136384Srwatson if (pdir_pde(PTD, kernel_vm_end)) { 567136384Srwatson kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 568136384Srwatson continue; 569136384Srwatson } 570136384Srwatson ++nkpt; 571136390Srwatson if (!nkpg) { 572136390Srwatson nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM); 573136384Srwatson if (!nkpg) 574136384Srwatson panic("pmap_growkernel: no memory to grow kernel"); 575136384Srwatson vm_page_wire(nkpg); 576136384Srwatson vm_page_remove(nkpg); 577136384Srwatson pmap_zero_page(VM_PAGE_TO_PHYS(nkpg)); 578136384Srwatson } 579136384Srwatson pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_KW); 580136384Srwatson nkpg = NULL; 581136384Srwatson 582136390Srwatson for (p = (struct proc *) allproc; p != NULL; p = p->p_next) { 583136390Srwatson if (p->p_vmspace) { 584136384Srwatson pmap = &p->p_vmspace->vm_pmap; 585136384Srwatson *pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 586136390Srwatson } 587136390Srwatson } 588136390Srwatson *pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end); 589136384Srwatson kernel_vm_end = (kernel_vm_end + NBPG * NPTEPG) & ~(NBPG * NPTEPG - 1); 590136384Srwatson } 591136390Srwatson splx(s); 592136390Srwatson} 593136390Srwatson 594136384Srwatson/* 595136384Srwatson * Retire the given physical map from service. 596136384Srwatson * Should only be called if the map contains 597136384Srwatson * no valid mappings. 598136384Srwatson */ 599136390Srwatsonvoid 600136390Srwatsonpmap_destroy(pmap) 601136390Srwatson register pmap_t pmap; 602136384Srwatson{ 603136384Srwatson int count; 604136384Srwatson 605136384Srwatson if (pmap == NULL) 606136384Srwatson return; 607136384Srwatson 608136384Srwatson simple_lock(&pmap->pm_lock); 609136390Srwatson count = --pmap->pm_count; 610136390Srwatson simple_unlock(&pmap->pm_lock); 611136384Srwatson if (count == 0) { 612136384Srwatson pmap_release(pmap); 613136384Srwatson free((caddr_t) pmap, M_VMPMAP); 614136384Srwatson } 615136384Srwatson} 616136384Srwatson 617136384Srwatson/* 618136384Srwatson * Release any resources held by the given physical map. 619136390Srwatson * Called when a pmap initialized by pmap_pinit is being released. 620136390Srwatson * Should only be called if the map contains no valid mappings. 621136384Srwatson */ 622136384Srwatsonvoid 623136390Srwatsonpmap_release(pmap) 624136390Srwatson register struct pmap *pmap; 625136384Srwatson{ 626136384Srwatson kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE); 627136390Srwatson} 628136390Srwatson 629136384Srwatson/* 630136384Srwatson * Add a reference to the specified pmap. 631136390Srwatson */ 632136390Srwatsonvoid 633136384Srwatsonpmap_reference(pmap) 634136384Srwatson pmap_t pmap; 635136384Srwatson{ 636158561Sbms if (pmap != NULL) { 637158561Sbms simple_lock(&pmap->pm_lock); 638158561Sbms pmap->pm_count++; 639158561Sbms simple_unlock(&pmap->pm_lock); 640158561Sbms } 641158561Sbms} 642158561Sbms 643166437Sbms#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2) 644158561Sbms 645158561Sbms/* 646158561Sbms * Data for the pv entry allocation mechanism 647158561Sbms */ 648158561Sbmsint pv_freelistcnt; 649158561Sbmspv_entry_t pv_freelist; 650158561Sbmsvm_offset_t pvva; 651158561Sbmsint npvvapg; 652158561Sbms 653158561Sbms/* 654158561Sbms * free the pv_entry back to the free list 655158561Sbms */ 656158561Sbmsinline static void 657158561Sbmsfree_pv_entry(pv) 658158561Sbms pv_entry_t pv; 659158561Sbms{ 660158561Sbms if (!pv) 661166437Sbms return; 662166437Sbms ++pv_freelistcnt; 663166437Sbms pv->pv_next = pv_freelist; 664166437Sbms pv_freelist = pv; 665166437Sbms} 666166437Sbms 667158561Sbms/* 668166437Sbms * get a new pv_entry, allocating a block from the system 669166437Sbms * when needed. 670166437Sbms * the memory allocation is performed bypassing the malloc code 671166437Sbms * because of the possibility of allocations at interrupt time. 672166437Sbms */ 673166437Sbmsstatic inline pv_entry_t 674166437Sbmsget_pv_entry() 675166437Sbms{ 676166437Sbms pv_entry_t tmp; 677158561Sbms 678158561Sbms /* 679158561Sbms * get more pv_entry pages if needed 680158561Sbms */ 681158561Sbms if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) { 682170613Sbms pmap_alloc_pv_entry(); 683168548Sbms } 684158561Sbms /* 685166437Sbms * get a pv_entry off of the free list 686166437Sbms */ 687166437Sbms --pv_freelistcnt; 688158561Sbms tmp = pv_freelist; 689158561Sbms pv_freelist = tmp->pv_next; 690158561Sbms return tmp; 691158561Sbms} 692166437Sbms 693158561Sbms/* 694158561Sbms * this *strange* allocation routine *statistically* eliminates the 695170613Sbms * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure. 696168548Sbms * also -- this code is MUCH MUCH faster than the malloc equiv... 697158561Sbms */ 698166437Sbmsstatic void 699166437Sbmspmap_alloc_pv_entry() 700166437Sbms{ 701158561Sbms /* 702158561Sbms * do we have any pre-allocated map-pages left? 703158561Sbms */ 704158561Sbms if (npvvapg) { 705166437Sbms vm_page_t m; 706158561Sbms 707158561Sbms /* 708158561Sbms * we do this to keep recursion away 709158561Sbms */ 710158561Sbms pv_freelistcnt += PV_FREELIST_MIN; 711136384Srwatson /* 712136384Srwatson * allocate a physical page out of the vm system 713136384Srwatson */ 714136390Srwatson m = vm_page_alloc(kernel_object, 715136384Srwatson pvva - vm_map_min(kernel_map), VM_ALLOC_INTERRUPT); 716136384Srwatson if (m) { 717136384Srwatson int newentries; 718136384Srwatson int i; 719136384Srwatson pv_entry_t entry; 720136384Srwatson 721136384Srwatson newentries = (NBPG / sizeof(struct pv_entry)); 722136384Srwatson /* 723136384Srwatson * wire the page 724136384Srwatson */ 725136384Srwatson vm_page_wire(m); 726136384Srwatson m->flags &= ~PG_BUSY; 727136390Srwatson /* 728136384Srwatson * let the kernel see it 729136384Srwatson */ 730136384Srwatson pmap_kenter(pvva, VM_PAGE_TO_PHYS(m)); 731136384Srwatson 732136384Srwatson entry = (pv_entry_t) pvva; 733136384Srwatson /* 734136384Srwatson * update the allocation pointers 735136384Srwatson */ 736136384Srwatson pvva += NBPG; 737136390Srwatson --npvvapg; 738136384Srwatson 739136390Srwatson /* 740136390Srwatson * free the entries into the free list 741136390Srwatson */ 742136390Srwatson for (i = 0; i < newentries; i++) { 743136390Srwatson free_pv_entry(entry); 744136384Srwatson entry++; 745136390Srwatson } 746136384Srwatson } 747136390Srwatson pv_freelistcnt -= PV_FREELIST_MIN; 748136390Srwatson } 749136390Srwatson if (!pv_freelist) 750136384Srwatson panic("get_pv_entry: cannot get a pv_entry_t"); 751136390Srwatson} 752136390Srwatson 753136390Srwatson 754136390Srwatson 755136390Srwatson/* 756136390Srwatson * init the pv_entry allocation system 757136391Srwatson */ 758136391Srwatson#define PVSPERPAGE 64 759136384Srwatsonvoid 760136390Srwatsoninit_pv_entries(npg) 761136390Srwatson int npg; 762136390Srwatson{ 763136390Srwatson /* 764136390Srwatson * allocate enough kvm space for PVSPERPAGE entries per page (lots) 765136390Srwatson * kvm space is fairly cheap, be generous!!! (the system can panic if 766136390Srwatson * this is too small.) 767136390Srwatson */ 768136390Srwatson npvvapg = ((npg * PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1) / NBPG; 769136384Srwatson pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG); 770136390Srwatson /* 771136390Srwatson * get the first batch of entries 772136390Srwatson */ 773136390Srwatson free_pv_entry(get_pv_entry()); 774136390Srwatson} 775136390Srwatson 776136384Srwatsonstatic pt_entry_t * 777136390Srwatsonget_pt_entry(pmap) 778136390Srwatson pmap_t pmap; 779136390Srwatson{ 780136390Srwatson vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME; 781136390Srwatson 782136390Srwatson /* are we current address space or kernel? */ 783136390Srwatson if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) { 784136384Srwatson return PTmap; 785136390Srwatson } 786136390Srwatson /* otherwise, we are alternate address space */ 787136390Srwatson if (frame != ((int) APTDpde & PG_FRAME)) { 788136390Srwatson APTDpde = pmap->pm_pdir[PTDPTDI]; 789136390Srwatson pmap_update(); 790136390Srwatson } 791136390Srwatson return APTmap; 792136384Srwatson} 793136390Srwatson 794136390Srwatson/* 795136390Srwatson * If it is the first entry on the list, it is actually 796136390Srwatson * in the header and we must copy the following entry up 797136390Srwatson * to the header. Otherwise we must search the list for 798136390Srwatson * the entry. In either case we free the now unused entry. 799136390Srwatson */ 800136384Srwatsonvoid 801136390Srwatsonpmap_remove_entry(pmap, pv, va) 802136390Srwatson struct pmap *pmap; 803136390Srwatson pv_entry_t pv; 804136390Srwatson vm_offset_t va; 805136390Srwatson{ 806136390Srwatson pv_entry_t npv; 807136390Srwatson int s; 808136384Srwatson 809136390Srwatson s = splhigh(); 810136390Srwatson if (pmap == pv->pv_pmap && va == pv->pv_va) { 811136390Srwatson npv = pv->pv_next; 812136390Srwatson if (npv) { 813136390Srwatson *pv = *npv; 814136390Srwatson free_pv_entry(npv); 815136390Srwatson } else { 816136384Srwatson pv->pv_pmap = NULL; 817136390Srwatson } 818136390Srwatson } else { 819136390Srwatson for (npv = pv->pv_next; npv; npv = npv->pv_next) { 820136390Srwatson if (pmap == npv->pv_pmap && va == npv->pv_va) { 821136390Srwatson break; 822136390Srwatson } 823136390Srwatson pv = npv; 824136390Srwatson } 825136390Srwatson if (npv) { 826136390Srwatson pv->pv_next = npv->pv_next; 827136390Srwatson free_pv_entry(npv); 828136390Srwatson } 829136390Srwatson } 830136390Srwatson splx(s); 831136390Srwatson} 832136390Srwatson 833136390Srwatson/* 834136390Srwatson * Remove the given range of addresses from the specified map. 835136390Srwatson * 836136390Srwatson * It is assumed that the start and end are properly 837136390Srwatson * rounded to the page size. 838136390Srwatson */ 839136390Srwatsonvoid 840136390Srwatsonpmap_remove(pmap, sva, eva) 841136390Srwatson struct pmap *pmap; 842136390Srwatson register vm_offset_t sva; 843136390Srwatson register vm_offset_t eva; 844136390Srwatson{ 845136390Srwatson register pt_entry_t *ptp, *ptq; 846136390Srwatson vm_offset_t pa; 847136390Srwatson register pv_entry_t pv; 848136390Srwatson vm_offset_t va; 849136390Srwatson vm_page_t m; 850136390Srwatson pt_entry_t oldpte; 851136390Srwatson 852136390Srwatson if (pmap == NULL) 853136390Srwatson return; 854136390Srwatson 855136390Srwatson ptp = get_pt_entry(pmap); 856136390Srwatson 857136390Srwatson /* 858136390Srwatson * special handling of removing one page. a very 859136390Srwatson * common operation and easy to short circuit some 860136390Srwatson * code. 861136390Srwatson */ 862136390Srwatson if ((sva + NBPG) == eva) { 863136390Srwatson 864136390Srwatson if (*pmap_pde(pmap, sva) == 0) 865136390Srwatson return; 866158561Sbms 867158561Sbms ptq = ptp + i386_btop(sva); 868158561Sbms 869158561Sbms if (!*ptq) 870158561Sbms return; 871158561Sbms /* 872158561Sbms * Update statistics 873136390Srwatson */ 874136390Srwatson if (pmap_pte_w(ptq)) 875136390Srwatson pmap->pm_stats.wired_count--; 876136390Srwatson pmap->pm_stats.resident_count--; 877136390Srwatson 878136390Srwatson pa = pmap_pte_pa(ptq); 879136390Srwatson oldpte = *ptq; 880136390Srwatson *ptq = 0; 881136384Srwatson 882136384Srwatson if (pmap_is_managed(pa)) { 883166437Sbms if ((int) oldpte & PG_M) { 884166437Sbms if ((sva < USRSTACK || sva >= KERNBASE) || 885166437Sbms (sva >= USRSTACK && sva < USRSTACK + (UPAGES * NBPG))) { 886166437Sbms if (sva < clean_sva || sva >= clean_eva) { 887166437Sbms PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 888166437Sbms } 889166437Sbms } 890166437Sbms } 891136384Srwatson pv = pa_to_pvh(pa); 892136384Srwatson pmap_remove_entry(pmap, pv, sva); 893136384Srwatson } 894136384Srwatson pmap_unuse_pt(pmap, sva); 895136384Srwatson pmap_update(); 896136384Srwatson return; 897136384Srwatson } 898166437Sbms sva = i386_btop(sva); 899136384Srwatson eva = i386_btop(eva); 900166437Sbms 901166437Sbms while (sva < eva) { 902166437Sbms /* 903166437Sbms * Weed out invalid mappings. Note: we assume that the page 904166437Sbms * directory table is always allocated, and in kernel virtual. 905166437Sbms */ 906166437Sbms 907166437Sbms if (*pmap_pde(pmap, i386_ptob(sva)) == 0) { 908166437Sbms /* We can race ahead here, straight to next pde.. */ 909166437Sbms sva = ((sva + NPTEPG) & ~(NPTEPG - 1)); 910166437Sbms continue; 911166437Sbms } 912166437Sbms ptq = ptp + sva; 913166437Sbms 914166437Sbms /* 915166437Sbms * search for page table entries, use string operations that 916137587Snik * are much faster than explicitly scanning when page tables 917166437Sbms * are not fully populated. 918136384Srwatson */ 919136390Srwatson if (*ptq == 0) { 920136384Srwatson vm_offset_t pdnxt = ((sva + NPTEPG) & ~(NPTEPG - 1)); 921136390Srwatson vm_offset_t nscan = pdnxt - sva; 922136390Srwatson int found = 0; 923136390Srwatson 924136390Srwatson if ((nscan + sva) > eva) 925136384Srwatson nscan = eva - sva; 926136390Srwatson 927136390Srwatson asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 928136390Srwatson "=D"(ptq), "=a"(found) : "c"(nscan), "0"(ptq) : "cx"); 929136390Srwatson 930136390Srwatson if (!found) { 931136390Srwatson sva = pdnxt; 932136390Srwatson continue; 933136391Srwatson } 934136391Srwatson ptq -= 1; 935136391Srwatson 936136391Srwatson sva = ptq - ptp; 937136390Srwatson } 938136390Srwatson /* 939136390Srwatson * Update statistics 940136384Srwatson */ 941137587Snik oldpte = *ptq; 942136384Srwatson if (((int) oldpte) & PG_W) 943136384Srwatson pmap->pm_stats.wired_count--; 944 pmap->pm_stats.resident_count--; 945 946 /* 947 * Invalidate the PTEs. XXX: should cluster them up and 948 * invalidate as many as possible at once. 949 */ 950 *ptq = 0; 951 952 va = i386_ptob(sva); 953 954 /* 955 * Remove from the PV table (raise IPL since we may be called 956 * at interrupt time). 957 */ 958 pa = ((int) oldpte) & PG_FRAME; 959 if (!pmap_is_managed(pa)) { 960 pmap_unuse_pt(pmap, va); 961 ++sva; 962 continue; 963 } 964 if ((int) oldpte & PG_M) { 965 if ((va < USRSTACK || va >= KERNBASE) || 966 (va >= USRSTACK && va < USRSTACK + (UPAGES * NBPG))) { 967 if (va < clean_sva || va >= clean_eva) { 968 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 969 } 970 } 971 } 972 pv = pa_to_pvh(pa); 973 pmap_remove_entry(pmap, pv, va); 974 pmap_unuse_pt(pmap, va); 975 ++sva; 976 } 977 pmap_update(); 978} 979 980/* 981 * Routine: pmap_remove_all 982 * Function: 983 * Removes this physical page from 984 * all physical maps in which it resides. 985 * Reflects back modify bits to the pager. 986 * 987 * Notes: 988 * Original versions of this routine were very 989 * inefficient because they iteratively called 990 * pmap_remove (slow...) 991 */ 992void 993pmap_remove_all(pa) 994 vm_offset_t pa; 995{ 996 register pv_entry_t pv, npv; 997 register pt_entry_t *pte, *ptp; 998 vm_offset_t va; 999 struct pmap *pmap; 1000 vm_page_t m; 1001 int s; 1002 int anyvalid = 0; 1003 1004 /* 1005 * Not one of ours 1006 */ 1007 /* 1008 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1009 * pages! 1010 */ 1011 if (!pmap_is_managed(pa)) 1012 return; 1013 1014 pa = i386_trunc_page(pa); 1015 pv = pa_to_pvh(pa); 1016 m = PHYS_TO_VM_PAGE(pa); 1017 1018 s = splhigh(); 1019 while (pv->pv_pmap != NULL) { 1020 pmap = pv->pv_pmap; 1021 ptp = get_pt_entry(pmap); 1022 va = pv->pv_va; 1023 pte = ptp + i386_btop(va); 1024 if (pmap_pte_w(pte)) 1025 pmap->pm_stats.wired_count--; 1026 if (*pte) { 1027 pmap->pm_stats.resident_count--; 1028 anyvalid++; 1029 1030 /* 1031 * Update the vm_page_t clean and reference bits. 1032 */ 1033 if ((int) *pte & PG_M) { 1034 if ((va < USRSTACK || va >= KERNBASE) || 1035 (va >= USRSTACK && va < USRSTACK + (UPAGES * NBPG))) { 1036 if (va < clean_sva || va >= clean_eva) { 1037 PHYS_TO_VM_PAGE(pa)->dirty |= VM_PAGE_BITS_ALL; 1038 } 1039 } 1040 } 1041 *pte = 0; 1042 pmap_unuse_pt(pmap, va); 1043 } 1044 npv = pv->pv_next; 1045 if (npv) { 1046 *pv = *npv; 1047 free_pv_entry(npv); 1048 } else { 1049 pv->pv_pmap = NULL; 1050 } 1051 } 1052 splx(s); 1053 if (anyvalid) 1054 pmap_update(); 1055} 1056 1057 1058/* 1059 * Set the physical protection on the 1060 * specified range of this map as requested. 1061 */ 1062void 1063pmap_protect(pmap, sva, eva, prot) 1064 register pmap_t pmap; 1065 vm_offset_t sva, eva; 1066 vm_prot_t prot; 1067{ 1068 register pt_entry_t *pte; 1069 register vm_offset_t va; 1070 int i386prot; 1071 register pt_entry_t *ptp; 1072 int evap = i386_btop(eva); 1073 int anyvalid = 0;; 1074 1075 if (pmap == NULL) 1076 return; 1077 1078 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1079 pmap_remove(pmap, sva, eva); 1080 return; 1081 } 1082 if (prot & VM_PROT_WRITE) 1083 return; 1084 1085 ptp = get_pt_entry(pmap); 1086 1087 va = sva; 1088 while (va < eva) { 1089 int found = 0; 1090 int svap; 1091 vm_offset_t nscan; 1092 1093 /* 1094 * Page table page is not allocated. Skip it, we don't want to 1095 * force allocation of unnecessary PTE pages just to set the 1096 * protection. 1097 */ 1098 if (!*pmap_pde(pmap, va)) { 1099 /* XXX: avoid address wrap around */ 1100 nextpde: 1101 if (va >= i386_trunc_pdr((vm_offset_t) - 1)) 1102 break; 1103 va = i386_round_pdr(va + PAGE_SIZE); 1104 continue; 1105 } 1106 pte = ptp + i386_btop(va); 1107 1108 if (*pte == 0) { 1109 /* 1110 * scan for a non-empty pte 1111 */ 1112 svap = pte - ptp; 1113 nscan = ((svap + NPTEPG) & ~(NPTEPG - 1)) - svap; 1114 1115 if (nscan + svap > evap) 1116 nscan = evap - svap; 1117 1118 found = 0; 1119 if (nscan) 1120 asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;" : 1121 "=D"(pte), "=a"(found) : "c"(nscan), "0"(pte) : "cx"); 1122 1123 if (!found) 1124 goto nextpde; 1125 1126 pte -= 1; 1127 svap = pte - ptp; 1128 1129 va = i386_ptob(svap); 1130 } 1131 anyvalid++; 1132 1133 i386prot = pte_prot(pmap, prot); 1134 if (va < UPT_MAX_ADDRESS) { 1135 i386prot |= PG_u; 1136 if (va >= UPT_MIN_ADDRESS) 1137 i386prot |= PG_RW; 1138 } 1139 pmap_pte_set_prot(pte, i386prot); 1140 va += PAGE_SIZE; 1141 } 1142 if (anyvalid) 1143 pmap_update(); 1144} 1145 1146/* 1147 * Insert the given physical page (p) at 1148 * the specified virtual address (v) in the 1149 * target physical map with the protection requested. 1150 * 1151 * If specified, the page will be wired down, meaning 1152 * that the related pte can not be reclaimed. 1153 * 1154 * NB: This is the only routine which MAY NOT lazy-evaluate 1155 * or lose information. That is, this routine must actually 1156 * insert this page into the given map NOW. 1157 */ 1158void 1159pmap_enter(pmap, va, pa, prot, wired) 1160 register pmap_t pmap; 1161 vm_offset_t va; 1162 register vm_offset_t pa; 1163 vm_prot_t prot; 1164 boolean_t wired; 1165{ 1166 register pt_entry_t *pte; 1167 register pt_entry_t npte; 1168 vm_offset_t opa; 1169 int ptevalid = 0; 1170 1171 if (pmap == NULL) 1172 return; 1173 1174 va = i386_trunc_page(va); 1175 pa = i386_trunc_page(pa); 1176 if (va > VM_MAX_KERNEL_ADDRESS) 1177 panic("pmap_enter: toobig"); 1178 1179 /* 1180 * Page Directory table entry not valid, we need a new PT page 1181 */ 1182 if (*pmap_pde(pmap, va) == 0) { 1183 printf("kernel page directory invalid pdir=0x%x, va=0x%x\n", pmap->pm_pdir[PTDPTDI], va); 1184 panic("invalid kernel page directory"); 1185 } 1186 pte = pmap_pte(pmap, va); 1187 opa = pmap_pte_pa(pte); 1188 1189 /* 1190 * Mapping has not changed, must be protection or wiring change. 1191 */ 1192 if (opa == pa) { 1193 /* 1194 * Wiring change, just update stats. We don't worry about 1195 * wiring PT pages as they remain resident as long as there 1196 * are valid mappings in them. Hence, if a user page is wired, 1197 * the PT page will be also. 1198 */ 1199 if (wired && !pmap_pte_w(pte)) 1200 pmap->pm_stats.wired_count++; 1201 else if (!wired && pmap_pte_w(pte)) 1202 pmap->pm_stats.wired_count--; 1203 1204 goto validate; 1205 } 1206 /* 1207 * Mapping has changed, invalidate old range and fall through to 1208 * handle validating new mapping. 1209 */ 1210 if (opa) { 1211 pmap_remove(pmap, va, va + PAGE_SIZE); 1212 } 1213 /* 1214 * Enter on the PV list if part of our managed memory Note that we 1215 * raise IPL while manipulating pv_table since pmap_enter can be 1216 * called at interrupt time. 1217 */ 1218 if (pmap_is_managed(pa)) { 1219 register pv_entry_t pv, npv; 1220 int s; 1221 1222 pv = pa_to_pvh(pa); 1223 s = splhigh(); 1224 /* 1225 * No entries yet, use header as the first entry 1226 */ 1227 if (pv->pv_pmap == NULL) { 1228 pv->pv_va = va; 1229 pv->pv_pmap = pmap; 1230 pv->pv_next = NULL; 1231 } 1232 /* 1233 * There is at least one other VA mapping this page. Place 1234 * this entry after the header. 1235 */ 1236 else { 1237 npv = get_pv_entry(); 1238 npv->pv_va = va; 1239 npv->pv_pmap = pmap; 1240 npv->pv_next = pv->pv_next; 1241 pv->pv_next = npv; 1242 } 1243 splx(s); 1244 } 1245 1246 /* 1247 * Increment counters 1248 */ 1249 pmap->pm_stats.resident_count++; 1250 if (wired) 1251 pmap->pm_stats.wired_count++; 1252 1253validate: 1254 /* 1255 * Now validate mapping with desired protection/wiring. 1256 */ 1257 npte = (pt_entry_t) ((int) (pa | pte_prot(pmap, prot) | PG_V)); 1258 1259 /* 1260 * When forking (copy-on-write, etc): A process will turn off write 1261 * permissions for any of its writable pages. If the data (object) is 1262 * only referred to by one process, the processes map is modified 1263 * directly as opposed to using the object manipulation routine. When 1264 * using pmap_protect, the modified bits are not kept in the vm_page_t 1265 * data structure. Therefore, when using pmap_enter in vm_fault to 1266 * bring back writability of a page, there has been no memory of the 1267 * modified or referenced bits except at the pte level. this clause 1268 * supports the carryover of the modified and used (referenced) bits. 1269 */ 1270 if (pa == opa) 1271 (int) npte |= (int) *pte & (PG_M | PG_U); 1272 1273 1274 if (wired) 1275 (int) npte |= PG_W; 1276 if (va < UPT_MIN_ADDRESS) 1277 (int) npte |= PG_u; 1278 else if (va < UPT_MAX_ADDRESS) 1279 (int) npte |= PG_u | PG_RW; 1280 1281 if (*pte != npte) { 1282 if (*pte) 1283 ptevalid++; 1284 *pte = npte; 1285 } 1286 if (ptevalid) { 1287 pmap_update(); 1288 } else { 1289 pmap_use_pt(pmap, va); 1290 } 1291} 1292 1293/* 1294 * Add a list of wired pages to the kva 1295 * this routine is only used for temporary 1296 * kernel mappings that do not need to have 1297 * page modification or references recorded. 1298 * Note that old mappings are simply written 1299 * over. The page *must* be wired. 1300 */ 1301void 1302pmap_qenter(va, m, count) 1303 vm_offset_t va; 1304 vm_page_t *m; 1305 int count; 1306{ 1307 int i; 1308 int anyvalid = 0; 1309 register pt_entry_t *pte; 1310 1311 for (i = 0; i < count; i++) { 1312 pte = vtopte(va + i * NBPG); 1313 if (*pte) 1314 anyvalid++; 1315 *pte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | PG_W)); 1316 } 1317 if (anyvalid) 1318 pmap_update(); 1319} 1320/* 1321 * this routine jerks page mappings from the 1322 * kernel -- it is meant only for temporary mappings. 1323 */ 1324void 1325pmap_qremove(va, count) 1326 vm_offset_t va; 1327 int count; 1328{ 1329 int i; 1330 register pt_entry_t *pte; 1331 1332 for (i = 0; i < count; i++) { 1333 pte = vtopte(va + i * NBPG); 1334 *pte = 0; 1335 } 1336 pmap_update(); 1337} 1338 1339/* 1340 * add a wired page to the kva 1341 * note that in order for the mapping to take effect -- you 1342 * should do a pmap_update after doing the pmap_kenter... 1343 */ 1344void 1345pmap_kenter(va, pa) 1346 vm_offset_t va; 1347 register vm_offset_t pa; 1348{ 1349 register pt_entry_t *pte; 1350 int wasvalid = 0; 1351 1352 pte = vtopte(va); 1353 1354 if (*pte) 1355 wasvalid++; 1356 1357 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_W)); 1358 1359 if (wasvalid) 1360 pmap_update(); 1361} 1362 1363/* 1364 * remove a page from the kernel pagetables 1365 */ 1366void 1367pmap_kremove(va) 1368 vm_offset_t va; 1369{ 1370 register pt_entry_t *pte; 1371 1372 pte = vtopte(va); 1373 1374 *pte = (pt_entry_t) 0; 1375 pmap_update(); 1376} 1377 1378/* 1379 * this code makes some *MAJOR* assumptions: 1380 * 1. Current pmap & pmap exists. 1381 * 2. Not wired. 1382 * 3. Read access. 1383 * 4. No page table pages. 1384 * 5. Tlbflush is deferred to calling procedure. 1385 * 6. Page IS managed. 1386 * but is *MUCH* faster than pmap_enter... 1387 */ 1388 1389static inline void 1390pmap_enter_quick(pmap, va, pa) 1391 register pmap_t pmap; 1392 vm_offset_t va; 1393 register vm_offset_t pa; 1394{ 1395 register pt_entry_t *pte; 1396 register pv_entry_t pv, npv; 1397 int s; 1398 1399 /* 1400 * Enter on the PV list if part of our managed memory Note that we 1401 * raise IPL while manipulating pv_table since pmap_enter can be 1402 * called at interrupt time. 1403 */ 1404 1405 pte = vtopte(va); 1406 1407 /* a fault on the page table might occur here */ 1408 if (*pte) { 1409 pmap_remove(pmap, va, va + PAGE_SIZE); 1410 } 1411 pv = pa_to_pvh(pa); 1412 s = splhigh(); 1413 /* 1414 * No entries yet, use header as the first entry 1415 */ 1416 if (pv->pv_pmap == NULL) { 1417 pv->pv_pmap = pmap; 1418 pv->pv_va = va; 1419 pv->pv_next = NULL; 1420 } 1421 /* 1422 * There is at least one other VA mapping this page. Place this entry 1423 * after the header. 1424 */ 1425 else { 1426 npv = get_pv_entry(); 1427 npv->pv_va = va; 1428 npv->pv_pmap = pmap; 1429 npv->pv_next = pv->pv_next; 1430 pv->pv_next = npv; 1431 } 1432 splx(s); 1433 1434 /* 1435 * Increment counters 1436 */ 1437 pmap->pm_stats.resident_count++; 1438 1439 /* 1440 * Now validate mapping with desired protection/wiring. 1441 */ 1442 *pte = (pt_entry_t) ((int) (pa | PG_V | PG_u)); 1443 1444 pmap_use_pt(pmap, va); 1445 1446 return; 1447} 1448 1449#define MAX_INIT_PT (1024*2048) 1450/* 1451 * pmap_object_init_pt preloads the ptes for a given object 1452 * into the specified pmap. This eliminates the blast of soft 1453 * faults on process startup and immediately after an mmap. 1454 */ 1455void 1456pmap_object_init_pt(pmap, addr, object, offset, size) 1457 pmap_t pmap; 1458 vm_offset_t addr; 1459 vm_object_t object; 1460 vm_offset_t offset; 1461 vm_offset_t size; 1462{ 1463 vm_offset_t tmpoff; 1464 vm_page_t p; 1465 int bits; 1466 int objbytes; 1467 1468 if (!pmap || ((size > MAX_INIT_PT) && 1469 (object->resident_page_count > (MAX_INIT_PT / NBPG)))) { 1470 return; 1471 } 1472 if (!vm_object_lock_try(object)) 1473 return; 1474 1475 /* 1476 * if we are processing a major portion of the object, then scan the 1477 * entire thing. 1478 */ 1479 if (size > (object->size >> 2)) { 1480 objbytes = size; 1481 1482 for (p = object->memq.tqh_first; 1483 ((objbytes > 0) && (p != NULL)); 1484 p = p->listq.tqe_next) { 1485 1486 tmpoff = p->offset; 1487 if (tmpoff < offset) { 1488 continue; 1489 } 1490 tmpoff -= offset; 1491 if (tmpoff >= size) { 1492 continue; 1493 } 1494 if (((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) && 1495 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1496 (p->bmapped == 0) && 1497 (p->busy == 0) && 1498 (p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) { 1499 vm_page_hold(p); 1500 p->flags |= PG_MAPPED; 1501 pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p)); 1502 vm_page_unhold(p); 1503 } 1504 objbytes -= NBPG; 1505 } 1506 } else { 1507 /* 1508 * else lookup the pages one-by-one. 1509 */ 1510 for (tmpoff = 0; tmpoff < size; tmpoff += NBPG) { 1511 p = vm_page_lookup(object, tmpoff + offset); 1512 if (p && ((p->flags & (PG_ACTIVE | PG_INACTIVE)) != 0) && 1513 (p->bmapped == 0) && (p->busy == 0) && 1514 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1515 (p->flags & (PG_BUSY | PG_FICTITIOUS | PG_CACHE)) == 0) { 1516 vm_page_hold(p); 1517 p->flags |= PG_MAPPED; 1518 pmap_enter_quick(pmap, addr + tmpoff, VM_PAGE_TO_PHYS(p)); 1519 vm_page_unhold(p); 1520 } 1521 } 1522 } 1523 vm_object_unlock(object); 1524} 1525 1526#if 0 1527/* 1528 * pmap_prefault provides a quick way of clustering 1529 * pagefaults into a processes address space. It is a "cousin" 1530 * of pmap_object_init_pt, except it runs at page fault time instead 1531 * of mmap time. 1532 */ 1533#define PFBAK 2 1534#define PFFOR 2 1535#define PAGEORDER_SIZE (PFBAK+PFFOR) 1536 1537static int pmap_prefault_pageorder[] = { 1538 -NBPG, NBPG, -2 * NBPG, 2 * NBPG 1539}; 1540 1541void 1542pmap_prefault(pmap, addra, entry, object) 1543 pmap_t pmap; 1544 vm_offset_t addra; 1545 vm_map_entry_t entry; 1546 vm_object_t object; 1547{ 1548 int i; 1549 vm_offset_t starta, enda; 1550 vm_offset_t offset, addr; 1551 vm_page_t m; 1552 int pageorder_index; 1553 1554 if (entry->object.vm_object != object) 1555 return; 1556 1557 if (pmap != &curproc->p_vmspace->vm_pmap) 1558 return; 1559 1560 starta = addra - PFBAK * NBPG; 1561 if (starta < entry->start) { 1562 starta = entry->start; 1563 } else if (starta > addra) 1564 starta = 0; 1565 1566 enda = addra + PFFOR * NBPG; 1567 if (enda > entry->end) 1568 enda = entry->end; 1569 1570 for (i = 0; i < PAGEORDER_SIZE; i++) { 1571 vm_object_t lobject; 1572 pt_entry_t *pte; 1573 1574 addr = addra + pmap_prefault_pageorder[i]; 1575 if (addr < starta || addr >= enda) 1576 continue; 1577 1578 pte = vtopte(addr); 1579 if (*pte) 1580 continue; 1581 1582 offset = (addr - entry->start) + entry->offset; 1583 lobject = object; 1584 for (m = vm_page_lookup(lobject, offset); 1585 (!m && lobject->shadow && !lobject->pager); 1586 lobject = lobject->shadow) { 1587 1588 offset += lobject->shadow_offset; 1589 m = vm_page_lookup(lobject->shadow, offset); 1590 } 1591 1592 /* 1593 * give-up when a page is not in memory 1594 */ 1595 if (m == NULL) 1596 break; 1597 1598 if (((m->flags & (PG_CACHE | PG_ACTIVE | PG_INACTIVE)) != 0) && 1599 ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1600 (m->busy == 0) && 1601 (m->bmapped == 0) && 1602 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1603 /* 1604 * test results show that the system is faster when 1605 * pages are activated. 1606 */ 1607 if ((m->flags & PG_ACTIVE) == 0) { 1608 if( m->flags & PG_CACHE) 1609 vm_page_deactivate(m); 1610 else 1611 vm_page_activate(m); 1612 } 1613 vm_page_hold(m); 1614 m->flags |= PG_MAPPED; 1615 pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m)); 1616 vm_page_unhold(m); 1617 } 1618 } 1619} 1620#endif 1621 1622/* 1623 * Routine: pmap_change_wiring 1624 * Function: Change the wiring attribute for a map/virtual-address 1625 * pair. 1626 * In/out conditions: 1627 * The mapping must already exist in the pmap. 1628 */ 1629void 1630pmap_change_wiring(pmap, va, wired) 1631 register pmap_t pmap; 1632 vm_offset_t va; 1633 boolean_t wired; 1634{ 1635 register pt_entry_t *pte; 1636 1637 if (pmap == NULL) 1638 return; 1639 1640 pte = pmap_pte(pmap, va); 1641 1642 if (wired && !pmap_pte_w(pte)) 1643 pmap->pm_stats.wired_count++; 1644 else if (!wired && pmap_pte_w(pte)) 1645 pmap->pm_stats.wired_count--; 1646 1647 /* 1648 * Wiring is not a hardware characteristic so there is no need to 1649 * invalidate TLB. 1650 */ 1651 pmap_pte_set_w(pte, wired); 1652 /* 1653 * When unwiring, set the modified bit in the pte -- could have been 1654 * changed by the kernel 1655 */ 1656 if (!wired) 1657 (int) *pte |= PG_M; 1658} 1659 1660 1661 1662/* 1663 * Copy the range specified by src_addr/len 1664 * from the source map to the range dst_addr/len 1665 * in the destination map. 1666 * 1667 * This routine is only advisory and need not do anything. 1668 */ 1669void 1670pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) 1671 pmap_t dst_pmap, src_pmap; 1672 vm_offset_t dst_addr; 1673 vm_size_t len; 1674 vm_offset_t src_addr; 1675{ 1676} 1677 1678/* 1679 * Routine: pmap_kernel 1680 * Function: 1681 * Returns the physical map handle for the kernel. 1682 */ 1683pmap_t 1684pmap_kernel() 1685{ 1686 return (kernel_pmap); 1687} 1688 1689/* 1690 * pmap_zero_page zeros the specified (machine independent) 1691 * page by mapping the page into virtual memory and using 1692 * bzero to clear its contents, one machine dependent page 1693 * at a time. 1694 */ 1695void 1696pmap_zero_page(phys) 1697 vm_offset_t phys; 1698{ 1699 if (*(int *) CMAP2) 1700 panic("pmap_zero_page: CMAP busy"); 1701 1702 *(int *) CMAP2 = PG_V | PG_KW | i386_trunc_page(phys); 1703 bzero(CADDR2, NBPG); 1704 1705 *(int *) CMAP2 = 0; 1706 pmap_update(); 1707} 1708 1709/* 1710 * pmap_copy_page copies the specified (machine independent) 1711 * page by mapping the page into virtual memory and using 1712 * bcopy to copy the page, one machine dependent page at a 1713 * time. 1714 */ 1715void 1716pmap_copy_page(src, dst) 1717 vm_offset_t src; 1718 vm_offset_t dst; 1719{ 1720 if (*(int *) CMAP1 || *(int *) CMAP2) 1721 panic("pmap_copy_page: CMAP busy"); 1722 1723 *(int *) CMAP1 = PG_V | PG_KW | i386_trunc_page(src); 1724 *(int *) CMAP2 = PG_V | PG_KW | i386_trunc_page(dst); 1725 1726#if __GNUC__ > 1 1727 memcpy(CADDR2, CADDR1, NBPG); 1728#else 1729 bcopy(CADDR1, CADDR2, NBPG); 1730#endif 1731 *(int *) CMAP1 = 0; 1732 *(int *) CMAP2 = 0; 1733 pmap_update(); 1734} 1735 1736 1737/* 1738 * Routine: pmap_pageable 1739 * Function: 1740 * Make the specified pages (by pmap, offset) 1741 * pageable (or not) as requested. 1742 * 1743 * A page which is not pageable may not take 1744 * a fault; therefore, its page table entry 1745 * must remain valid for the duration. 1746 * 1747 * This routine is merely advisory; pmap_enter 1748 * will specify that these pages are to be wired 1749 * down (or not) as appropriate. 1750 */ 1751void 1752pmap_pageable(pmap, sva, eva, pageable) 1753 pmap_t pmap; 1754 vm_offset_t sva, eva; 1755 boolean_t pageable; 1756{ 1757} 1758 1759/* 1760 * this routine returns true if a physical page resides 1761 * in the given pmap. 1762 */ 1763boolean_t 1764pmap_page_exists(pmap, pa) 1765 pmap_t pmap; 1766 vm_offset_t pa; 1767{ 1768 register pv_entry_t pv; 1769 int s; 1770 1771 if (!pmap_is_managed(pa)) 1772 return FALSE; 1773 1774 pv = pa_to_pvh(pa); 1775 s = splhigh(); 1776 1777 /* 1778 * Not found, check current mappings returning immediately if found. 1779 */ 1780 if (pv->pv_pmap != NULL) { 1781 for (; pv; pv = pv->pv_next) { 1782 if (pv->pv_pmap == pmap) { 1783 splx(s); 1784 return TRUE; 1785 } 1786 } 1787 } 1788 splx(s); 1789 return (FALSE); 1790} 1791 1792/* 1793 * pmap_testbit tests bits in pte's 1794 * note that the testbit/changebit routines are inline, 1795 * and a lot of things compile-time evaluate. 1796 */ 1797static __inline boolean_t 1798pmap_testbit(pa, bit) 1799 register vm_offset_t pa; 1800 int bit; 1801{ 1802 register pv_entry_t pv; 1803 pt_entry_t *pte; 1804 int s; 1805 1806 if (!pmap_is_managed(pa)) 1807 return FALSE; 1808 1809 pv = pa_to_pvh(pa); 1810 s = splhigh(); 1811 1812 /* 1813 * Not found, check current mappings returning immediately if found. 1814 */ 1815 if (pv->pv_pmap != NULL) { 1816 for (; pv; pv = pv->pv_next) { 1817 /* 1818 * if the bit being tested is the modified bit, then 1819 * mark UPAGES as always modified, and ptes as never 1820 * modified. 1821 */ 1822 if (bit & PG_U) { 1823 if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) { 1824 continue; 1825 } 1826 } 1827 if (bit & PG_M) { 1828 if (pv->pv_va >= USRSTACK) { 1829 if (pv->pv_va >= clean_sva && pv->pv_va < clean_eva) { 1830 continue; 1831 } 1832 if (pv->pv_va < USRSTACK + (UPAGES * NBPG)) { 1833 splx(s); 1834 return TRUE; 1835 } else if (pv->pv_va < KERNBASE) { 1836 splx(s); 1837 return FALSE; 1838 } 1839 } 1840 } 1841 if (!pv->pv_pmap) { 1842 printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va); 1843 continue; 1844 } 1845 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1846 if ((int) *pte & bit) { 1847 splx(s); 1848 return TRUE; 1849 } 1850 } 1851 } 1852 splx(s); 1853 return (FALSE); 1854} 1855 1856/* 1857 * this routine is used to modify bits in ptes 1858 */ 1859static __inline void 1860pmap_changebit(pa, bit, setem) 1861 vm_offset_t pa; 1862 int bit; 1863 boolean_t setem; 1864{ 1865 register pv_entry_t pv; 1866 register pt_entry_t *pte, npte; 1867 vm_offset_t va; 1868 int s; 1869 1870 if (!pmap_is_managed(pa)) 1871 return; 1872 1873 pv = pa_to_pvh(pa); 1874 s = splhigh(); 1875 1876 /* 1877 * Loop over all current mappings setting/clearing as appropos If 1878 * setting RO do we need to clear the VAC? 1879 */ 1880 if (pv->pv_pmap != NULL) { 1881 for (; pv; pv = pv->pv_next) { 1882 va = pv->pv_va; 1883 1884 /* 1885 * don't write protect pager mappings 1886 */ 1887 if (!setem && (bit == PG_RW)) { 1888 if (va >= clean_sva && va < clean_eva) 1889 continue; 1890 } 1891 if (!pv->pv_pmap) { 1892 printf("Null pmap (cb) at va: 0x%lx\n", va); 1893 continue; 1894 } 1895 pte = pmap_pte(pv->pv_pmap, va); 1896 if (setem) 1897 (int) npte = (int) *pte | bit; 1898 else 1899 (int) npte = (int) *pte & ~bit; 1900 *pte = npte; 1901 } 1902 } 1903 splx(s); 1904 pmap_update(); 1905} 1906 1907/* 1908 * pmap_page_protect: 1909 * 1910 * Lower the permission for all mappings to a given page. 1911 */ 1912void 1913pmap_page_protect(phys, prot) 1914 vm_offset_t phys; 1915 vm_prot_t prot; 1916{ 1917 if ((prot & VM_PROT_WRITE) == 0) { 1918 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) 1919 pmap_changebit(phys, PG_RW, FALSE); 1920 else 1921 pmap_remove_all(phys); 1922 } 1923} 1924 1925vm_offset_t 1926pmap_phys_address(ppn) 1927 int ppn; 1928{ 1929 return (i386_ptob(ppn)); 1930} 1931 1932/* 1933 * pmap_is_referenced: 1934 * 1935 * Return whether or not the specified physical page was referenced 1936 * by any physical maps. 1937 */ 1938boolean_t 1939pmap_is_referenced(vm_offset_t pa) 1940{ 1941 return pmap_testbit((pa), PG_U); 1942} 1943 1944/* 1945 * pmap_is_modified: 1946 * 1947 * Return whether or not the specified physical page was modified 1948 * in any physical maps. 1949 */ 1950boolean_t 1951pmap_is_modified(vm_offset_t pa) 1952{ 1953 return pmap_testbit((pa), PG_M); 1954} 1955 1956/* 1957 * Clear the modify bits on the specified physical page. 1958 */ 1959void 1960pmap_clear_modify(vm_offset_t pa) 1961{ 1962 pmap_changebit((pa), PG_M, FALSE); 1963} 1964 1965/* 1966 * pmap_clear_reference: 1967 * 1968 * Clear the reference bit on the specified physical page. 1969 */ 1970void 1971pmap_clear_reference(vm_offset_t pa) 1972{ 1973 pmap_changebit((pa), PG_U, FALSE); 1974} 1975 1976/* 1977 * Routine: pmap_copy_on_write 1978 * Function: 1979 * Remove write privileges from all 1980 * physical maps for this physical page. 1981 */ 1982void 1983pmap_copy_on_write(vm_offset_t pa) 1984{ 1985 pmap_changebit((pa), PG_RW, FALSE); 1986} 1987 1988/* 1989 * Miscellaneous support routines follow 1990 */ 1991 1992void 1993i386_protection_init() 1994{ 1995 register int *kp, prot; 1996 1997 kp = protection_codes; 1998 for (prot = 0; prot < 8; prot++) { 1999 switch (prot) { 2000 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2001 /* 2002 * Read access is also 0. There isn't any execute bit, 2003 * so just make it readable. 2004 */ 2005 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2006 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2007 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2008 *kp++ = 0; 2009 break; 2010 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2011 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2012 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2013 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2014 *kp++ = PG_RW; 2015 break; 2016 } 2017 } 2018} 2019 2020/* 2021 * Map a set of physical memory pages into the kernel virtual 2022 * address space. Return a pointer to where it is mapped. This 2023 * routine is intended to be used for mapping device memory, 2024 * NOT real memory. The non-cacheable bits are set on each 2025 * mapped page. 2026 */ 2027void * 2028pmap_mapdev(pa, size) 2029 vm_offset_t pa; 2030 vm_size_t size; 2031{ 2032 vm_offset_t va, tmpva; 2033 pt_entry_t *pte; 2034 2035 pa = trunc_page(pa); 2036 size = roundup(size, PAGE_SIZE); 2037 2038 va = kmem_alloc_pageable(kernel_map, size); 2039 if (!va) 2040 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2041 2042 for (tmpva = va; size > 0;) { 2043 pte = vtopte(tmpva); 2044 *pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N)); 2045 size -= PAGE_SIZE; 2046 tmpva += PAGE_SIZE; 2047 pa += PAGE_SIZE; 2048 } 2049 pmap_update(); 2050 2051 return ((void *) va); 2052} 2053 2054#ifdef DEBUG 2055/* print address space of pmap*/ 2056void 2057pads(pm) 2058 pmap_t pm; 2059{ 2060 unsigned va, i, j; 2061 pt_entry_t *ptep; 2062 2063 if (pm == kernel_pmap) 2064 return; 2065 for (i = 0; i < 1024; i++) 2066 if (pm->pm_pdir[i]) 2067 for (j = 0; j < 1024; j++) { 2068 va = (i << PD_SHIFT) + (j << PG_SHIFT); 2069 if (pm == kernel_pmap && va < KERNBASE) 2070 continue; 2071 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2072 continue; 2073 ptep = pmap_pte(pm, va); 2074 if (pmap_pte_v(ptep)) 2075 printf("%x:%x ", va, *(int *) ptep); 2076 }; 2077 2078} 2079 2080void 2081pmap_pvdump(pa) 2082 vm_offset_t pa; 2083{ 2084 register pv_entry_t pv; 2085 2086 printf("pa %x", pa); 2087 for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) { 2088#ifdef used_to_be 2089 printf(" -> pmap %x, va %x, flags %x", 2090 pv->pv_pmap, pv->pv_va, pv->pv_flags); 2091#endif 2092 printf(" -> pmap %x, va %x", 2093 pv->pv_pmap, pv->pv_va); 2094 pads(pv->pv_pmap); 2095 } 2096 printf(" "); 2097} 2098#endif 2099