1/* $NetBSD: pmap_private.h,v 1.5 2023/10/04 20:28:06 ad Exp $ */ 2 3/* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28/* 29 * Copyright (c) 2001 Wasabi Systems, Inc. 30 * All rights reserved. 31 * 32 * Written by Frank van der Linden for Wasabi Systems, Inc. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed for the NetBSD Project by 45 * Wasabi Systems, Inc. 46 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 47 * or promote products derived from this software without specific prior 48 * written permission. 49 * 50 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 51 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 52 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 53 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 54 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 55 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 56 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 57 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 58 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 59 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 60 * POSSIBILITY OF SUCH DAMAGE. 61 */ 62 63#ifndef _X86_PMAP_PRIVATE_H_ 64#define _X86_PMAP_PRIVATE_H_ 65 66#ifndef _MACHINE_PMAP_PRIVATE_H_X86 67#error Include machine/pmap_private.h, not x86/pmap_private.h. 68#endif 69 70#ifdef _KERNEL_OPT 71#include "opt_svs.h" 72#endif 73 74#include <sys/param.h> 75#include <sys/types.h> 76 77#include <sys/kcpuset.h> 78#include <sys/mutex.h> 79#include <sys/pool.h> 80#include <sys/queue.h> 81#include <sys/rwlock.h> 82 83#include <machine/cpufunc.h> 84#include <machine/pte.h> 85#include <machine/vmparam.h> 86 87#include <uvm/uvm_object.h> 88#include <uvm/uvm_pmap.h> 89 90struct pmap; 91 92#define SLAREA_USER 0 93#define SLAREA_PTE 1 94#define SLAREA_MAIN 2 95#define SLAREA_PCPU 3 96#define SLAREA_DMAP 4 97#define SLAREA_HYPV 5 98#define SLAREA_ASAN 6 99#define SLAREA_MSAN 7 100#define SLAREA_KERN 8 101#define SLSPACE_NAREAS 9 102 103struct slotspace { 104 struct { 105 size_t sslot; /* start slot */ 106 size_t nslot; /* # of slots */ 107 bool active; /* area is active */ 108 } area[SLSPACE_NAREAS]; 109}; 110 111extern struct slotspace slotspace; 112 113#include <x86/gdt.h> 114 115struct pcpu_entry { 116 uint8_t gdt[MAXGDTSIZ]; 117 uint8_t ldt[MAX_USERLDT_SIZE]; 118 uint8_t idt[PAGE_SIZE]; 119 uint8_t tss[PAGE_SIZE]; 120 uint8_t ist0[PAGE_SIZE]; 121 uint8_t ist1[PAGE_SIZE]; 122 uint8_t ist2[PAGE_SIZE]; 123 uint8_t ist3[PAGE_SIZE]; 124 uint8_t rsp0[2 * PAGE_SIZE]; 125} __packed; 126 127struct pcpu_area { 128#ifdef SVS 129 uint8_t utls[PAGE_SIZE]; 130#endif 131 uint8_t ldt[PAGE_SIZE]; 132 struct pcpu_entry ent[MAXCPUS]; 133} __packed; 134 135extern struct pcpu_area *pcpuarea; 136 137#define PMAP_PCID_KERN 0 138#define PMAP_PCID_USER 1 139 140/* 141 * pmap data structures: see pmap.c for details of locking. 142 */ 143 144/* 145 * we maintain a list of all non-kernel pmaps 146 */ 147 148LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */ 149 150/* 151 * linked list of all non-kernel pmaps 152 */ 153extern struct pmap_head pmaps; 154extern kmutex_t pmaps_lock; /* protects pmaps */ 155 156/* 157 * pool_cache(9) that pmaps are allocated from 158 */ 159extern struct pool_cache pmap_cache; 160 161/* 162 * the pmap structure 163 * 164 * note that the pm_obj contains the lock pointer, the reference count, 165 * page list, and number of PTPs within the pmap. 166 * 167 * pm_lock is the same as the lock for vm object 0. Changes to 168 * the other objects may only be made if that lock has been taken 169 * (the other object locks are only used when uvm_pagealloc is called) 170 */ 171 172struct pv_page; 173 174struct pmap { 175 struct uvm_object pm_obj[PTP_LEVELS-1];/* objects for lvl >= 1) */ 176 LIST_ENTRY(pmap) pm_list; /* list of all pmaps */ 177 pd_entry_t *pm_pdir; /* VA of PD */ 178 paddr_t pm_pdirpa[PDP_SIZE]; /* PA of PDs (read-only after create) */ 179 struct vm_page *pm_ptphint[PTP_LEVELS-1]; 180 /* pointer to a PTP in our pmap */ 181 struct pmap_statistics pm_stats; /* pmap stats */ 182 struct pv_entry *pm_pve; /* spare pv_entry */ 183 LIST_HEAD(, pv_page) pm_pvp_part; 184 LIST_HEAD(, pv_page) pm_pvp_empty; 185 LIST_HEAD(, pv_page) pm_pvp_full; 186 187#if !defined(__x86_64__) 188 vaddr_t pm_hiexec; /* highest executable mapping */ 189#endif /* !defined(__x86_64__) */ 190 191 union descriptor *pm_ldt; /* user-set LDT */ 192 size_t pm_ldt_len; /* XXX unused, remove */ 193 int pm_ldt_sel; /* LDT selector */ 194 195 kcpuset_t *pm_cpus; /* mask of CPUs using pmap */ 196 kcpuset_t *pm_kernel_cpus; /* mask of CPUs using kernel part 197 of pmap */ 198 kcpuset_t *pm_xen_ptp_cpus; /* mask of CPUs which have this pmap's 199 ptp mapped */ 200 long pm_pctr; /* for assertions */ 201 LIST_HEAD(,vm_page) pm_gc_ptp; /* PTPs queued for free */ 202 203 /* Used by NVMM and Xen */ 204 int (*pm_enter)(struct pmap *, vaddr_t, paddr_t, vm_prot_t, u_int); 205 bool (*pm_extract)(struct pmap *, vaddr_t, paddr_t *); 206 void (*pm_remove)(struct pmap *, vaddr_t, vaddr_t); 207 int (*pm_sync_pv)(struct vm_page *, vaddr_t, paddr_t, int, uint8_t *, 208 pt_entry_t *); 209 void (*pm_pp_remove_ent)(struct pmap *, struct vm_page *, pt_entry_t, 210 vaddr_t); 211 void (*pm_write_protect)(struct pmap *, vaddr_t, vaddr_t, vm_prot_t); 212 void (*pm_unwire)(struct pmap *, vaddr_t); 213 214 void (*pm_tlb_flush)(struct pmap *); 215 void *pm_data; 216 217 kmutex_t pm_lock /* locks for pm_objs */ 218 __aligned(64); /* give lock own cache line */ 219 krwlock_t pm_dummy_lock; /* ugly hack for abusing uvm_object */ 220}; 221 222/* macro to access pm_pdirpa slots */ 223#ifdef PAE 224#define pmap_pdirpa(pmap, index) \ 225 ((pmap)->pm_pdirpa[l2tol3(index)] + l2tol2(index) * sizeof(pd_entry_t)) 226#else 227#define pmap_pdirpa(pmap, index) \ 228 ((pmap)->pm_pdirpa[0] + (index) * sizeof(pd_entry_t)) 229#endif 230 231/* 232 * global kernel variables 233 */ 234 235/* 236 * PDPpaddr is the physical address of the kernel's PDP. 237 * - i386 non-PAE and amd64: PDPpaddr corresponds directly to the %cr3 238 * value associated to the kernel process, proc0. 239 * - i386 PAE: it still represents the PA of the kernel's PDP (L2). Due to 240 * the L3 PD, it cannot be considered as the equivalent of a %cr3 any more. 241 * - Xen: it corresponds to the PFN of the kernel's PDP. 242 */ 243extern u_long PDPpaddr; 244 245extern pd_entry_t pmap_pg_g; /* do we support PTE_G? */ 246extern pd_entry_t pmap_pg_nx; /* do we support PTE_NX? */ 247extern int pmap_largepages; 248extern long nkptp[PTP_LEVELS]; 249 250#define pmap_valid_entry(E) ((E) & PTE_P) /* is PDE or PTE valid? */ 251 252void pmap_map_ptes(struct pmap *, struct pmap **, pd_entry_t **, 253 pd_entry_t * const **); 254void pmap_unmap_ptes(struct pmap *, struct pmap *); 255 256bool pmap_pdes_valid(vaddr_t, pd_entry_t * const *, pd_entry_t *, 257 int *lastlvl); 258 259bool pmap_is_curpmap(struct pmap *); 260 261void pmap_ept_transform(struct pmap *); 262 263#ifndef __HAVE_DIRECT_MAP 264void pmap_vpage_cpu_init(struct cpu_info *); 265#endif 266vaddr_t slotspace_rand(int, size_t, size_t, size_t, vaddr_t); 267 268vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */ 269 270typedef enum tlbwhy { 271 TLBSHOOT_REMOVE_ALL, 272 TLBSHOOT_KENTER, 273 TLBSHOOT_KREMOVE, 274 TLBSHOOT_FREE_PTP, 275 TLBSHOOT_REMOVE_PTE, 276 TLBSHOOT_SYNC_PV, 277 TLBSHOOT_WRITE_PROTECT, 278 TLBSHOOT_ENTER, 279 TLBSHOOT_NVMM, 280 TLBSHOOT_BUS_DMA, 281 TLBSHOOT_BUS_SPACE, 282 TLBSHOOT__MAX, 283} tlbwhy_t; 284 285void pmap_tlb_init(void); 286void pmap_tlb_cpu_init(struct cpu_info *); 287void pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t, tlbwhy_t); 288void pmap_tlb_shootnow(void); 289void pmap_tlb_intr(void); 290 291/* 292 * inline functions 293 */ 294 295/* 296 * pmap_update_pg: flush one page from the TLB (or flush the whole thing 297 * if hardware doesn't support one-page flushing) 298 */ 299 300__inline static void __unused 301pmap_update_pg(vaddr_t va) 302{ 303 invlpg(va); 304} 305 306/* 307 * various address inlines 308 * 309 * vtopte: return a pointer to the PTE mapping a VA, works only for 310 * user and PT addresses 311 * 312 * kvtopte: return a pointer to the PTE mapping a kernel VA 313 */ 314 315#include <lib/libkern/libkern.h> 316 317static __inline pt_entry_t * __unused 318vtopte(vaddr_t va) 319{ 320 321 KASSERT(va < VM_MIN_KERNEL_ADDRESS); 322 323 return (PTE_BASE + pl1_i(va)); 324} 325 326static __inline pt_entry_t * __unused 327kvtopte(vaddr_t va) 328{ 329 pd_entry_t *pde; 330 331 KASSERT(va >= VM_MIN_KERNEL_ADDRESS); 332 333 pde = L2_BASE + pl2_i(va); 334 if (*pde & PTE_PS) 335 return ((pt_entry_t *)pde); 336 337 return (PTE_BASE + pl1_i(va)); 338} 339 340#ifdef XENPV 341#include <sys/bitops.h> 342 343#define XPTE_MASK L1_FRAME 344/* Selects the index of a PTE in (A)PTE_BASE */ 345#define XPTE_SHIFT (L1_SHIFT - ilog2(sizeof(pt_entry_t))) 346 347/* PTE access inline functions */ 348 349/* 350 * Get the machine address of the pointed pte 351 * We use hardware MMU to get value so works only for levels 1-3 352 */ 353 354static __inline paddr_t 355xpmap_ptetomach(pt_entry_t *pte) 356{ 357 pt_entry_t *up_pte; 358 vaddr_t va = (vaddr_t) pte; 359 360 va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE; 361 up_pte = (pt_entry_t *) va; 362 363 return (paddr_t) (((*up_pte) & PTE_FRAME) + (((vaddr_t) pte) & (~PTE_FRAME & ~VA_SIGN_MASK))); 364} 365 366/* Xen helpers to change bits of a pte */ 367#define XPMAP_UPDATE_DIRECT 1 /* Update direct map entry flags too */ 368 369paddr_t vtomach(vaddr_t); 370#define vtomfn(va) (vtomach(va) >> PAGE_SHIFT) 371#endif /* XENPV */ 372 373#ifdef __HAVE_PCPU_AREA 374extern struct pcpu_area *pcpuarea; 375#define PDIR_SLOT_PCPU 510 376#define PMAP_PCPU_BASE (VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4))) 377#endif 378 379void svs_quad_copy(void *, void *, long); 380 381#ifdef _KERNEL_OPT 382#include "opt_efi.h" 383#endif 384 385#ifdef EFI_RUNTIME 386void * pmap_activate_sync(struct pmap *); 387void pmap_deactivate_sync(struct pmap *, void *); 388bool pmap_is_user(struct pmap *); 389#else 390static inline bool 391pmap_is_user(struct pmap *pmap) 392{ 393 394 KASSERT(pmap != pmap_kernel()); 395 return true; 396} 397#endif 398 399#endif /* _X86_PMAP_PRIVATE_H_ */ 400