xenpmap.h revision 183927
1/* 2 * 3 * Copyright (c) 2004 Christian Limpach. 4 * Copyright (c) 2004,2005 Kip Macy 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Christian Limpach. 18 * 4. The name of the author may not be used to endorse or promote products 19 * derived from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 22 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 23 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 24 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 26 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 30 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 */ 32 33 34#ifndef _XEN_XENPMAP_H_ 35#define _XEN_XENPMAP_H_ 36void _xen_queue_pt_update(vm_paddr_t, vm_paddr_t, char *, int); 37void xen_pt_switch(vm_paddr_t); 38void xen_set_ldt(vm_paddr_t, unsigned long); 39void xen_pgdpt_pin(vm_paddr_t); 40void xen_pgd_pin(vm_paddr_t); 41void xen_pgd_unpin(vm_paddr_t); 42void xen_pt_pin(vm_paddr_t); 43void xen_pt_unpin(vm_paddr_t); 44void xen_flush_queue(void); 45void xen_check_queue(void); 46#if 0 47void pmap_ref(pt_entry_t *pte, vm_paddr_t ma); 48#endif 49 50#ifdef INVARIANTS 51#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), __FILE__, __LINE__) 52#else 53#define xen_queue_pt_update(a, b) _xen_queue_pt_update((a), (b), NULL, 0) 54#endif 55 56 57#include <sys/param.h> 58#include <sys/pcpu.h> 59 60#ifdef PMAP_DEBUG 61#define PMAP_REF pmap_ref 62#define PMAP_DEC_REF_PAGE pmap_dec_ref_page 63#define PMAP_MARK_PRIV pmap_mark_privileged 64#define PMAP_MARK_UNPRIV pmap_mark_unprivileged 65#else 66#define PMAP_MARK_PRIV(a) 67#define PMAP_MARK_UNPRIV(a) 68#define PMAP_REF(a, b) 69#define PMAP_DEC_REF_PAGE(a) 70#endif 71 72#define ALWAYS_SYNC 0 73 74#ifdef PT_DEBUG 75#define PT_LOG() printk("WP PT_SET %s:%d\n", __FILE__, __LINE__) 76#else 77#define PT_LOG() 78#endif 79 80#define INVALID_P2M_ENTRY (~0UL) 81 82#define pmap_valid_entry(E) ((E) & PG_V) /* is PDE or PTE valid? */ 83 84#define SH_PD_SET_VA 1 85#define SH_PD_SET_VA_MA 2 86#define SH_PD_SET_VA_CLEAR 3 87 88struct pmap; 89void pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type); 90#ifdef notyet 91static vm_paddr_t 92vptetomachpte(vm_paddr_t *pte) 93{ 94 vm_offset_t offset, ppte; 95 vm_paddr_t pgoffset, retval, *pdir_shadow_ptr; 96 int pgindex; 97 98 ppte = (vm_offset_t)pte; 99 pgoffset = (ppte & PAGE_MASK); 100 offset = ppte - (vm_offset_t)PTmap; 101 pgindex = ppte >> PDRSHIFT; 102 103 pdir_shadow_ptr = (vm_paddr_t *)PCPU_GET(pdir_shadow); 104 retval = (pdir_shadow_ptr[pgindex] & ~PAGE_MASK) + pgoffset; 105 return (retval); 106} 107#endif 108#define PT_GET(_ptp) \ 109 (pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0)) 110 111#ifdef WRITABLE_PAGETABLES 112 113#define PT_SET_VA(_ptp,_npte,sync) do { \ 114 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 115 PT_LOG(); \ 116 *(_ptp) = xpmap_ptom((_npte)); \ 117} while (/*CONSTCOND*/0) 118#define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 119 PMAP_REF((_ptp), (_npte)); \ 120 PT_LOG(); \ 121 *(_ptp) = (_npte); \ 122} while (/*CONSTCOND*/0) 123#define PT_CLEAR_VA(_ptp, sync) do { \ 124 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 125 PT_LOG(); \ 126 *(_ptp) = 0; \ 127} while (/*CONSTCOND*/0) 128 129#define PD_SET_VA(_pmap, _ptp, _npte, sync) do { \ 130 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 131 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA); \ 132 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 133} while (/*CONSTCOND*/0) 134#define PD_SET_VA_MA(_pmap, _ptp, _npte, sync) do { \ 135 PMAP_REF((_ptp), (_npte)); \ 136 pd_set((_pmap),(_ptp),(_npte), SH_PD_SET_VA_MA); \ 137 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 138} while (/*CONSTCOND*/0) 139#define PD_CLEAR_VA(_pmap, _ptp, sync) do { \ 140 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 141 pd_set((_pmap),(_ptp), 0, SH_PD_SET_VA_CLEAR); \ 142 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 143} while (/*CONSTCOND*/0) 144 145#else /* !WRITABLE_PAGETABLES */ 146 147#define PT_SET_VA(_ptp,_npte,sync) do { \ 148 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 149 xen_queue_pt_update(vtomach(_ptp), \ 150 xpmap_ptom(_npte)); \ 151 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 152} while (/*CONSTCOND*/0) 153#define PT_SET_VA_MA(_ptp,_npte,sync) do { \ 154 PMAP_REF((_ptp), (_npte)); \ 155 xen_queue_pt_update(vtomach(_ptp), _npte); \ 156 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 157} while (/*CONSTCOND*/0) 158#define PT_CLEAR_VA(_ptp, sync) do { \ 159 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 160 xen_queue_pt_update(vtomach(_ptp), 0); \ 161 if (sync || ALWAYS_SYNC) \ 162 xen_flush_queue(); \ 163} while (/*CONSTCOND*/0) 164 165#define PD_SET_VA(_pmap, _ptepindex,_npte,sync) do { \ 166 PMAP_REF((_ptp), xpmap_ptom(_npte)); \ 167 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA); \ 168 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 169} while (/*CONSTCOND*/0) 170#define PD_SET_VA_MA(_pmap, _ptepindex,_npte,sync) do { \ 171 PMAP_REF((_ptp), (_npte)); \ 172 pd_set((_pmap),(_ptepindex),(_npte), SH_PD_SET_VA_MA); \ 173 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 174} while (/*CONSTCOND*/0) 175#define PD_CLEAR_VA(_pmap, _ptepindex, sync) do { \ 176 PMAP_REF((pt_entry_t *)(_ptp), 0); \ 177 pd_set((_pmap),(_ptepindex), 0, SH_PD_SET_VA_CLEAR); \ 178 if (sync || ALWAYS_SYNC) xen_flush_queue(); \ 179} while (/*CONSTCOND*/0) 180 181#endif 182 183#define PT_SET_MA(_va, _ma) \ 184do { \ 185 PANIC_IF(HYPERVISOR_update_va_mapping(((unsigned long)(_va)),\ 186 (_ma), \ 187 UVMF_INVLPG| UVMF_ALL) < 0); \ 188} while (/*CONSTCOND*/0) 189 190#define PT_UPDATES_FLUSH() do { \ 191 xen_flush_queue(); \ 192} while (/*CONSTCOND*/0) 193 194static __inline vm_paddr_t 195xpmap_mtop(vm_paddr_t mpa) 196{ 197 vm_paddr_t tmp = (mpa & PG_FRAME); 198 199 return machtophys(tmp) | (mpa & ~PG_FRAME); 200} 201 202static __inline vm_paddr_t 203xpmap_ptom(vm_paddr_t ppa) 204{ 205 vm_paddr_t tmp = (ppa & PG_FRAME); 206 207 return phystomach(tmp) | (ppa & ~PG_FRAME); 208} 209 210static __inline void 211set_phys_to_machine(unsigned long pfn, unsigned long mfn) 212{ 213#ifdef notyet 214 PANIC_IF(max_mapnr && pfn >= max_mapnr); 215#endif 216 if (xen_feature(XENFEAT_auto_translated_physmap)) { 217#ifdef notyet 218 PANIC_IF((pfn != mfn && mfn != INVALID_P2M_ENTRY)); 219#endif 220 return; 221 } 222 xen_phys_machine[pfn] = mfn; 223} 224 225 226 227 228#endif /* _XEN_XENPMAP_H_ */ 229