1/*	$NetBSD: xenpmap.h,v 1.33.2.1 2012/05/09 03:22:52 riz Exp $	*/
2
3/*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30#ifndef _XEN_XENPMAP_H_
31#define _XEN_XENPMAP_H_
32
33#ifdef _KERNEL_OPT
34#include "opt_xen.h"
35#endif
36
37#include <sys/types.h>
38#include <sys/kcpuset.h>
39
40#define	INVALID_P2M_ENTRY	(~0UL)
41
42void xpq_queue_machphys_update(paddr_t, paddr_t);
43void xpq_queue_invlpg(vaddr_t);
44void xpq_queue_pte_update(paddr_t, pt_entry_t);
45void xpq_queue_pt_switch(paddr_t);
46void xpq_flush_queue(void);
47void xpq_queue_set_ldt(vaddr_t, uint32_t);
48void xpq_queue_tlb_flush(void);
49void xpq_queue_pin_table(paddr_t, int);
50void xpq_queue_unpin_table(paddr_t);
51int  xpq_update_foreign(paddr_t, pt_entry_t, int);
52void xen_vcpu_mcast_invlpg(vaddr_t, vaddr_t, kcpuset_t *);
53void xen_vcpu_bcast_invlpg(vaddr_t, vaddr_t);
54void xen_mcast_tlbflush(kcpuset_t *);
55void xen_bcast_tlbflush(void);
56void xen_mcast_invlpg(vaddr_t, kcpuset_t *);
57void xen_bcast_invlpg(vaddr_t);
58
59void pmap_xen_resume(void);
60void pmap_xen_suspend(void);
61void pmap_map_recursive_entries(void);
62void pmap_unmap_recursive_entries(void);
63
64#if defined(PAE) || defined(__x86_64__)
65void xen_kpm_sync(struct pmap *, int);
66#endif /* PAE || __x86_64__ */
67
68#define xpq_queue_pin_l1_table(pa)	\
69	xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
70#define xpq_queue_pin_l2_table(pa)	\
71	xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
72#define xpq_queue_pin_l3_table(pa)	\
73	xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
74#define xpq_queue_pin_l4_table(pa)	\
75	xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
76
77extern unsigned long *xpmap_phys_to_machine_mapping;
78
79/*
80 * On Xen-2, the start of the day virtual memory starts at KERNTEXTOFF
81 * (0xc0100000). On Xen-3 for domain0 it starts at KERNBASE (0xc0000000).
82 * So the offset between physical and virtual address is different on
83 * Xen-2 and Xen-3 for domain0.
84 * starting with xen-3.0.2, we can add notes so that virtual memory starts
85 * at KERNBASE for domU as well.
86 */
87#if defined(DOM0OPS) || !defined(XEN_COMPAT_030001)
88#define XPMAP_OFFSET	0
89#else
90#define	XPMAP_OFFSET	(KERNTEXTOFF - KERNBASE)
91#endif
92
93#define mfn_to_pfn(mfn) (machine_to_phys_mapping[(mfn)])
94#define pfn_to_mfn(pfn) (xpmap_phys_to_machine_mapping[(pfn)])
95
96static __inline paddr_t
97xpmap_mtop_masked(paddr_t mpa)
98{
99	return (
100	    ((paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT)
101	    + XPMAP_OFFSET);
102}
103
104static __inline paddr_t
105xpmap_mtop(paddr_t mpa)
106{
107	return (xpmap_mtop_masked(mpa) | (mpa & ~PG_FRAME));
108}
109
110static __inline paddr_t
111xpmap_ptom_masked(paddr_t ppa)
112{
113	return (((paddr_t)xpmap_phys_to_machine_mapping[(ppa -
114	    XPMAP_OFFSET) >> PAGE_SHIFT]) << PAGE_SHIFT);
115}
116
117static __inline paddr_t
118xpmap_ptom(paddr_t ppa)
119{
120	return (xpmap_ptom_masked(ppa) | (ppa & ~PG_FRAME));
121}
122
123static inline void
124MULTI_update_va_mapping(
125	multicall_entry_t *mcl, vaddr_t va,
126	pt_entry_t new_val, unsigned long flags)
127{
128	mcl->op = __HYPERVISOR_update_va_mapping;
129	mcl->args[0] = va;
130#if defined(__x86_64__)
131	mcl->args[1] = new_val;
132	mcl->args[2] = flags;
133#else
134	mcl->args[1] = (new_val & 0xffffffff);
135#ifdef PAE
136	mcl->args[2] = (new_val >> 32);
137#else
138	mcl->args[2] = 0;
139#endif
140	mcl->args[3] = flags;
141#endif
142}
143
144static inline void
145MULTI_update_va_mapping_otherdomain(
146	multicall_entry_t *mcl, vaddr_t va,
147	pt_entry_t new_val, unsigned long flags, domid_t domid)
148{
149	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
150	mcl->args[0] = va;
151#if defined(__x86_64__)
152	mcl->args[1] = new_val;
153	mcl->args[2] = flags;
154	mcl->args[3] = domid;
155#else
156	mcl->args[1] = (new_val & 0xffffffff);
157#ifdef PAE
158	mcl->args[2] = (new_val >> 32);
159#else
160	mcl->args[2] = 0;
161#endif
162	mcl->args[3] = flags;
163	mcl->args[4] = domid;
164#endif
165}
166#if defined(__x86_64__)
167#define MULTI_UVMFLAGS_INDEX 2
168#define MULTI_UVMDOMID_INDEX 3
169#else
170#define MULTI_UVMFLAGS_INDEX 3
171#define MULTI_UVMDOMID_INDEX 4
172#endif
173
174#if defined(__x86_64__)
175void xen_set_user_pgd(paddr_t);
176#endif
177
178#endif /* _XEN_XENPMAP_H_ */
179