1/*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department and William Jolitz of UUNET Technologies Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * Derived from hp300 version by Mike Hibler, this version by William 36 * Jolitz uses a recursive map [a pde points to the page directory] to 37 * map the page tables using the pagetables themselves. This is done to 38 * reduce the impact on kernel virtual memory for lots of sparse address 39 * space, and to reduce the cost of memory to each process. 40 * 41 * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 42 * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 43 * from: src/sys/i386/include/pmap.h,v 1.65.2.2 2000/11/30 01:54:42 peter 44 * JNPR: pmap.h,v 1.7.2.1 2007/09/10 07:44:12 girish 45 * $FreeBSD$ 46 */ 47 48#ifndef _MACHINE_PMAP_H_ 49#define _MACHINE_PMAP_H_ 50 51#include <vm/vm_param.h> 52#include <machine/pte.h> 53 54#if defined(__mips_n32) || defined(__mips_n64) /* PHYSADDR_64BIT */ 55#define NKPT 256 /* mem > 4G, vm_page_startup needs more KPTs */ 56#else 57#define NKPT 120 /* actual number of kernel page tables */ 58#endif 59 60#ifndef LOCORE 61 62#include <sys/queue.h> 63#include <sys/_cpuset.h> 64#include <sys/_lock.h> 65#include <sys/_mutex.h> 66 67/* 68 * Pmap stuff 69 */ 70struct pv_entry; 71struct pv_chunk; 72 73struct md_page { 74 int pv_flags; 75 TAILQ_HEAD(, pv_entry) pv_list; 76}; 77 78#define PV_TABLE_REF 0x02 /* referenced */ 79#define PV_MEMATTR_MASK 0xf0 /* store vm_memattr_t here */ 80#define PV_MEMATTR_SHIFT 0x04 81 82#define ASID_BITS 8 83#define ASIDGEN_BITS (32 - ASID_BITS) 84#define ASIDGEN_MASK ((1 << ASIDGEN_BITS) - 1) 85 86struct pmap { 87 pd_entry_t *pm_segtab; /* KVA of segment table */ 88 TAILQ_HEAD(, pv_chunk) pm_pvchunk; /* list of mappings in pmap */ 89 cpuset_t pm_active; /* active on cpus */ 90 struct { 91 u_int32_t asid:ASID_BITS; /* TLB address space tag */ 92 u_int32_t gen:ASIDGEN_BITS; /* its generation number */ 93 } pm_asid[MAXSMPCPU]; 94 struct pmap_statistics pm_stats; /* pmap statistics */ 95 struct mtx pm_mtx; 96}; 97 98typedef struct pmap *pmap_t; 99 100#ifdef _KERNEL 101 102pt_entry_t *pmap_pte(pmap_t, vm_offset_t); 103vm_paddr_t pmap_kextract(vm_offset_t va); 104 105#define vtophys(va) pmap_kextract(((vm_offset_t) (va))) 106#define pmap_asid(pmap) (pmap)->pm_asid[PCPU_GET(cpuid)].asid 107 108extern struct pmap kernel_pmap_store; 109#define kernel_pmap (&kernel_pmap_store) 110 111#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) 112#define PMAP_LOCK_ASSERT(pmap, type) mtx_assert(&(pmap)->pm_mtx, (type)) 113#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) 114#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ 115 NULL, MTX_DEF) 116#define PMAP_LOCKED(pmap) mtx_owned(&(pmap)->pm_mtx) 117#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) 118#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) 119#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) 120 121/* 122 * For each vm_page_t, there is a list of all currently valid virtual 123 * mappings of that page. An entry is a pv_entry_t, the list is pv_table. 124 */ 125typedef struct pv_entry { 126 vm_offset_t pv_va; /* virtual address for mapping */ 127 TAILQ_ENTRY(pv_entry) pv_list; 128} *pv_entry_t; 129 130/* 131 * pv_entries are allocated in chunks per-process. This avoids the 132 * need to track per-pmap assignments. 133 */ 134#ifdef __mips_n64 135#define _NPCM 3 136#define _NPCPV 168 137#else 138#define _NPCM 11 139#define _NPCPV 336 140#endif 141struct pv_chunk { 142 pmap_t pc_pmap; 143 TAILQ_ENTRY(pv_chunk) pc_list; 144 u_long pc_map[_NPCM]; /* bitmap; 1 = free */ 145 TAILQ_ENTRY(pv_chunk) pc_lru; 146 struct pv_entry pc_pventry[_NPCPV]; 147}; 148 149/* 150 * physmem_desc[] is a superset of phys_avail[] and describes all the 151 * memory present in the system. 152 * 153 * phys_avail[] is similar but does not include the memory stolen by 154 * pmap_steal_memory(). 155 * 156 * Each memory region is described by a pair of elements in the array 157 * so we can describe up to (PHYS_AVAIL_ENTRIES / 2) distinct memory 158 * regions. 159 */ 160extern vm_paddr_t physmem_desc[PHYS_AVAIL_COUNT]; 161 162extern vm_offset_t virtual_avail; 163extern vm_offset_t virtual_end; 164 165#define pmap_page_get_memattr(m) (((m)->md.pv_flags & PV_MEMATTR_MASK) >> PV_MEMATTR_SHIFT) 166#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) 167#define pmap_page_is_write_mapped(m) (((m)->a.flags & PGA_WRITEABLE) != 0) 168 169void pmap_bootstrap(void); 170void *pmap_mapdev(vm_paddr_t, vm_size_t); 171void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); 172void pmap_unmapdev(vm_offset_t, vm_size_t); 173vm_offset_t pmap_steal_memory(vm_size_t size); 174void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 175void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, vm_memattr_t attr); 176void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); 177void pmap_kremove(vm_offset_t va); 178void pmap_kremove_device(vm_offset_t, vm_size_t); 179void *pmap_kenter_temporary(vm_paddr_t pa, int i); 180void pmap_kenter_temporary_free(vm_paddr_t pa); 181void pmap_flush_pvcache(vm_page_t m); 182int pmap_emulate_modified(pmap_t pmap, vm_offset_t va); 183void pmap_page_set_memattr(vm_page_t, vm_memattr_t); 184int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); 185 186static inline int 187pmap_vmspace_copy(pmap_t dst_pmap __unused, pmap_t src_pmap __unused) 188{ 189 190 return (0); 191} 192 193#endif /* _KERNEL */ 194 195#endif /* !LOCORE */ 196 197#endif /* !_MACHINE_PMAP_H_ */ 198