Deleted Added
full compact
pmap.c (336523) pmap.c (337262)
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm

--- 71 unchanged lines hidden (view full) ---

80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85#define AMD64_NPT_AWARE
86
87#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm

--- 71 unchanged lines hidden (view full) ---

80 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
81 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
82 * SUCH DAMAGE.
83 */
84
85#define AMD64_NPT_AWARE
86
87#include <sys/cdefs.h>
88__FBSDID("$FreeBSD: stable/11/sys/amd64/amd64/pmap.c 336523 2018-07-19 22:53:23Z markj $");
88__FBSDID("$FreeBSD: stable/11/sys/amd64/amd64/pmap.c 337262 2018-08-03 15:42:39Z markj $");
89
90/*
91 * Manages physical address maps.
92 *
93 * Since the information managed by this module is
94 * also stored by the logical address mapping module,
95 * this module may throw away valid virtual-to-physical
96 * mappings at almost any time. However, invalidations

--- 271 unchanged lines hidden (view full) ---

368static u_int64_t KPDphys; /* phys addr of kernel level 2 */
369u_int64_t KPDPphys; /* phys addr of kernel level 3 */
370u_int64_t KPML4phys; /* phys addr of kernel level 4 */
371
372static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
373static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
374static int ndmpdpphys; /* number of DMPDPphys pages */
375
89
90/*
91 * Manages physical address maps.
92 *
93 * Since the information managed by this module is
94 * also stored by the logical address mapping module,
95 * this module may throw away valid virtual-to-physical
96 * mappings at almost any time. However, invalidations

--- 271 unchanged lines hidden (view full) ---

368static u_int64_t KPDphys; /* phys addr of kernel level 2 */
369u_int64_t KPDPphys; /* phys addr of kernel level 3 */
370u_int64_t KPML4phys; /* phys addr of kernel level 4 */
371
372static u_int64_t DMPDphys; /* phys addr of direct mapped level 2 */
373static u_int64_t DMPDPphys; /* phys addr of direct mapped level 3 */
374static int ndmpdpphys; /* number of DMPDPphys pages */
375
376static vm_paddr_t KERNend; /* phys addr of end of bootstrap data */
377
376/*
377 * pmap_mapdev support pre initialization (i.e. console)
378 */
379#define PMAP_PREINIT_MAPPING_COUNT 8
380static struct pmap_preinit_mapping {
381 vm_paddr_t pa;
382 vm_offset_t va;
383 vm_size_t sz;

--- 549 unchanged lines hidden (view full) ---

933 /* Now map the page tables at their location within PTmap */
934 pd_p = (pd_entry_t *)KPDphys;
935 for (i = 0; i < nkpt; i++)
936 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
937
938 /* Map from zero to end of allocations under 2M pages */
939 /* This replaces some of the KPTphys entries above */
940 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
378/*
379 * pmap_mapdev support pre initialization (i.e. console)
380 */
381#define PMAP_PREINIT_MAPPING_COUNT 8
382static struct pmap_preinit_mapping {
383 vm_paddr_t pa;
384 vm_offset_t va;
385 vm_size_t sz;

--- 549 unchanged lines hidden (view full) ---

935 /* Now map the page tables at their location within PTmap */
936 pd_p = (pd_entry_t *)KPDphys;
937 for (i = 0; i < nkpt; i++)
938 pd_p[i] = (KPTphys + ptoa(i)) | X86_PG_RW | X86_PG_V;
939
940 /* Map from zero to end of allocations under 2M pages */
941 /* This replaces some of the KPTphys entries above */
942 for (i = 0; (i << PDRSHIFT) < *firstaddr; i++)
943 /* Preset PG_M and PG_A because demotion expects it. */
941 pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS |
944 pd_p[i] = (i << PDRSHIFT) | X86_PG_RW | X86_PG_V | PG_PS |
942 pg_g;
945 X86_PG_M | X86_PG_A | pg_g;
943
944 /*
945 * Because we map the physical blocks in 2M pages, adjust firstaddr
946 * to record the physical blocks we've actually mapped into kernel
947 * virtual address space.
948 */
949 *firstaddr = round_2mpage(*firstaddr);
950

--- 59 unchanged lines hidden (view full) ---

1010 */
1011void
1012pmap_bootstrap(vm_paddr_t *firstaddr)
1013{
1014 vm_offset_t va;
1015 pt_entry_t *pte;
1016 int i;
1017
946
947 /*
948 * Because we map the physical blocks in 2M pages, adjust firstaddr
949 * to record the physical blocks we've actually mapped into kernel
950 * virtual address space.
951 */
952 *firstaddr = round_2mpage(*firstaddr);
953

--- 59 unchanged lines hidden (view full) ---

1013 */
1014void
1015pmap_bootstrap(vm_paddr_t *firstaddr)
1016{
1017 vm_offset_t va;
1018 pt_entry_t *pte;
1019 int i;
1020
1021 KERNend = *firstaddr;
1022
1018 if (!pti)
1019 pg_g = X86_PG_G;
1020
1021 /*
1022 * Create an initial set of page tables to run the kernel in.
1023 */
1024 create_pagetables(firstaddr);
1025

--- 216 unchanged lines hidden (view full) ---

1242 }
1243 }
1244 }
1245
1246 /*
1247 * Initialize the vm page array entries for the kernel pmap's
1248 * page table pages.
1249 */
1023 if (!pti)
1024 pg_g = X86_PG_G;
1025
1026 /*
1027 * Create an initial set of page tables to run the kernel in.
1028 */
1029 create_pagetables(firstaddr);
1030

--- 216 unchanged lines hidden (view full) ---

1247 }
1248 }
1249 }
1250
1251 /*
1252 * Initialize the vm page array entries for the kernel pmap's
1253 * page table pages.
1254 */
1255 PMAP_LOCK(kernel_pmap);
1250 for (i = 0; i < nkpt; i++) {
1251 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
1252 KASSERT(mpte >= vm_page_array &&
1253 mpte < &vm_page_array[vm_page_array_size],
1254 ("pmap_init: page table page is out of range"));
1255 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
1256 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
1257 mpte->wire_count = 1;
1256 for (i = 0; i < nkpt; i++) {
1257 mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT));
1258 KASSERT(mpte >= vm_page_array &&
1259 mpte < &vm_page_array[vm_page_array_size],
1260 ("pmap_init: page table page is out of range"));
1261 mpte->pindex = pmap_pde_pindex(KERNBASE) + i;
1262 mpte->phys_addr = KPTphys + (i << PAGE_SHIFT);
1263 mpte->wire_count = 1;
1264 if (i << PDRSHIFT < KERNend &&
1265 pmap_insert_pt_page(kernel_pmap, mpte))
1266 panic("pmap_init: pmap_insert_pt_page failed");
1258 }
1267 }
1268 PMAP_UNLOCK(kernel_pmap);
1259 atomic_add_int(&vm_cnt.v_wire_count, nkpt);
1260
1261 /*
1262 * If the kernel is running on a virtual machine, then it must assume
1263 * that MCA is enabled by the hypervisor. Moreover, the kernel must
1264 * be prepared for the hypervisor changing the vendor and family that
1265 * are reported by CPUID. Consequently, the workaround for AMD Family
1266 * 10h Erratum 383 is enabled if the processor's feature set does not

--- 6877 unchanged lines hidden ---
1269 atomic_add_int(&vm_cnt.v_wire_count, nkpt);
1270
1271 /*
1272 * If the kernel is running on a virtual machine, then it must assume
1273 * that MCA is enabled by the hypervisor. Moreover, the kernel must
1274 * be prepared for the hypervisor changing the vendor and family that
1275 * are reported by CPUID. Consequently, the workaround for AMD Family
1276 * 10h Erratum 383 is enabled if the processor's feature set does not

--- 6877 unchanged lines hidden ---