xenpmap.h revision 225736
1/* 2 * 3 * Copyright (c) 2004 Christian Limpach. 4 * Copyright (c) 2004,2005 Kip Macy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Christian Limpach. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 34#ifndef _XEN_XENPMAP_H_ 35#define _XEN_XENPMAP_H_ 36 37#include <machine/xen/features.h> 38 39void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int); 40void xen_pt_switch(vm_paddr_t); 41void xen_set_ldt(vm_paddr_t, unsigned long); 42void xen_pgdpt_pin(vm_paddr_t); 43void xen_pgd_pin(vm_paddr_t); 44void xen_pgd_unpin(vm_paddr_t); 45void xen_pt_pin(vm_paddr_t); 46void xen_pt_unpin(vm_paddr_t); 47void xen_flush_queue(void); 48void xen_check_queue(void); 49#if 0 50void pmap_ref(pt_entry_t *pte, vm_paddr_t ma); 51#endif 52 53#ifdef INVARIANTS 54#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__) 55#else 56#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0) 57#endif 58 59#ifdef PMAP_DEBUG 60#define PMAP_REF pmap_ref 61#define PMAP_DEC_REF_PAGE pmap_dec_ref_page 62#define PMAP_MARK_PRIV pmap_mark_privileged 63#define PMAP_MARK_UNPRIV pmap_mark_unprivileged 64#else 65#define PMAP_MARK_PRIV(a) 66#define PMAP_MARK_UNPRIV(a) 67#define PMAP_REF(a, b) 68#define PMAP_DEC_REF_PAGE(a) 69#endif 70 71#define ALWAYS_SYNC 0 72 73#ifdef PT_DEBUG 74#define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__) 75#else 76#define PT_LOG() 77#endif 78 79#define INVALID_P2M_ENTRY (~0UL) 80 81#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 82 83#define SH_PD_SET_VA 1 84#define SH_PD_SET_VA_MA 2 85#define SH_PD_SET_VA_CLEAR 3 86 87struct pmap; 88void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type); 89#ifdef notyet 90static vm_paddr_t 91vptetomachpte(vm_paddr_t *pte) 92{ 93 vm_offset_t offset, ppte; 94 vm_paddr_t pgoffset, retval, *pdir_shadow_ptr; 95 int pgindex; 96 97 ppte = (vm_offset_t)pte; 98 pgoffset = (ppte & PAGE_MASK); 99 offset = ppte - (vm_offset_t)PTmap; 100 pgindex = ppte >> PDRSHIFT; 101 102 pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow); 103 retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset; 104 return (retval); 105} 106#endif 107#define PT_GET(_ptp) \ 108 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0)) 109 110#ifdef WRITABLE_PAGETABLES 111 112#define PT_SET_VA(_ptp,_npte,sync) do { \ 113 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 114 PT_LOG(); \ 115 *(_ptp) = xpmap_ptom((_npte)); \ 116} while (/*CONSTCOND*/0) 117#define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 118 PMAP_REF((_ptp), (_npte)); \ 119 PT_LOG(); \ 120 *(_ptp) = (_npte); \ 121} while (/*CONSTCOND*/0) 122#define PT_CLEAR_VA(_ptp, sync) do { \ 123 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 124 PT_LOG(); \ 125 *(_ptp) = 0; \ 126} while (/*CONSTCOND*/0) 127 128#define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \ 129 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 130 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \ 131 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 132} while (/*CONSTCOND*/0) 133#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \ 134 PMAP_REF((_ptp), (_npte)); \ 135 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \ 136 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 137} while (/*CONSTCOND*/0) 138#define PD_CLEAR_VA(_pmap, _ptp, sync) do { \ 139 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 140 pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \ 141 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 142} while (/*CONSTCOND*/0) 143 144#else /* !WRITABLE_PAGETABLES */ 145 146#define PT_SET_VA(_ptp,_npte,sync) do { \ 147 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 148 xen_queue_pt_update(vtomach(_ptp), \ 149 xpmap_ptom(_npte)); \ 150 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 151} while (/*CONSTCOND*/0) 152#define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 153 PMAP_REF((_ptp), (_npte)); \ 154 xen_queue_pt_update(vtomach(_ptp), _npte); \ 155 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 156} while (/*CONSTCOND*/0) 157#define PT_CLEAR_VA(_ptp, sync) do { \ 158 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 159 xen_queue_pt_update(vtomach(_ptp), 0); \ 160 if (sync || ALWAYS_SYNC) \ 161 xen_flush_queue(); \ 162} while (/*CONSTCOND*/0) 163 164#define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \ 165 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 166 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \ 167 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 168} while (/*CONSTCOND*/0) 169#define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \ 170 PMAP_REF((_ptp), (_npte)); \ 171 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \ 172 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 173} while (/*CONSTCOND*/0) 174#define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \ 175 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 176 pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \ 177 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 178} while (/*CONSTCOND*/0) 179 180#endif 181 182#define PT_SET_MA(_va, _ma) \ 183do { \ 184 PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\ 185 (_ma), \ 186 UVMF_INVLPG| UVMF_ALL) < 0); \ 187} while (/*CONSTCOND*/0) 188 189#define PT_UPDATES_FLUSH() do { \ 190 xen_flush_queue(); \ 191} while (/*CONSTCOND*/0) 192 193static __inline vm_paddr_t 194xpmap_mtop(vm_paddr_t mpa) 195{ 196 vm_paddr_t tmp = (mpa & PG_FRAME); 197 198 return machtophys(tmp) | (mpa & ~PG_FRAME); 199} 200 201static __inline vm_paddr_t 202xpmap_ptom(vm_paddr_t ppa) 203{ 204 vm_paddr_t tmp = (ppa & PG_FRAME); 205 206 return phystomach(tmp) | (ppa & ~PG_FRAME); 207} 208 209static __inline void 210set_phys_to_machine(unsigned long pfn, unsigned long mfn) 211{ 212#ifdef notyet 213 PANIC_IF(max_mapnr && pfn >= max_mapnr); 214#endif 215 if (xen_feature(XENFEAT_auto_translated_physmap)) { 216#ifdef notyet 217 PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY)); 218#endif 219 return; 220 } 221 xen_phys_machine[pfn] = mfn; 222} 223 224 225 226 227#endif /* _XEN_XENPMAP_H_ */ 228