xenpmap.h revision 1.43
1/*	$NetBSD: xenpmap.h,v 1.43 2020/04/25 15:26:17 bouyer Exp $	*/
2
3/*
4 *
5 * Copyright (c) 2004 Christian Limpach.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29
30#ifndef _XEN_XENPMAP_H_
31#define _XEN_XENPMAP_H_
32
33#ifdef _KERNEL_OPT
34#include "opt_xen.h"
35#endif
36
37#include <sys/types.h>
38#include <sys/kcpuset.h>
39
40#define	INVALID_P2M_ENTRY	(~0UL)
41
42void xpq_queue_machphys_update(paddr_t, paddr_t);
43void xpq_queue_invlpg(vaddr_t);
44void xpq_queue_pte_update(paddr_t, pt_entry_t);
45void xpq_queue_pt_switch(paddr_t);
46void xpq_flush_queue(void);
47void xpq_queue_set_ldt(vaddr_t, uint32_t);
48void xpq_queue_tlb_flush(void);
49void xpq_queue_pin_table(paddr_t, int);
50void xpq_queue_unpin_table(paddr_t);
51int  xpq_update_foreign(paddr_t, pt_entry_t, int);
52void xen_mcast_tlbflush(kcpuset_t *);
53void xen_bcast_tlbflush(void);
54void xen_mcast_invlpg(vaddr_t, kcpuset_t *);
55void xen_bcast_invlpg(vaddr_t);
56void xen_copy_page(paddr_t, paddr_t);
57void xen_pagezero(paddr_t);
58
59void pmap_xen_resume(void);
60void pmap_xen_suspend(void);
61void pmap_map_recursive_entries(void);
62void pmap_unmap_recursive_entries(void);
63
64void xen_kpm_sync(struct pmap *, int);
65
66#define xpq_queue_pin_l1_table(pa)	\
67	xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
68#define xpq_queue_pin_l2_table(pa)	\
69	xpq_queue_pin_table(pa, MMUEXT_PIN_L2_TABLE)
70#define xpq_queue_pin_l3_table(pa)	\
71	xpq_queue_pin_table(pa, MMUEXT_PIN_L3_TABLE)
72#define xpq_queue_pin_l4_table(pa)	\
73	xpq_queue_pin_table(pa, MMUEXT_PIN_L4_TABLE)
74
75#ifdef XENPV
76extern unsigned long *xpmap_phys_to_machine_mapping;
77
78static __inline paddr_t
79xpmap_mtop_masked(paddr_t mpa)
80{
81	return (
82	    (paddr_t)machine_to_phys_mapping[mpa >> PAGE_SHIFT] << PAGE_SHIFT);
83}
84
85static __inline paddr_t
86xpmap_mtop(paddr_t mpa)
87{
88	return (xpmap_mtop_masked(mpa) | (mpa & ~PTE_4KFRAME));
89}
90
91static __inline paddr_t
92xpmap_ptom_masked(paddr_t ppa)
93{
94	return (
95	    (paddr_t)xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
96	    << PAGE_SHIFT);
97}
98
99static __inline paddr_t
100xpmap_ptom(paddr_t ppa)
101{
102	return (xpmap_ptom_masked(ppa) | (ppa & ~PTE_4KFRAME));
103}
104
105static __inline void
106xpmap_ptom_map(paddr_t ppa, paddr_t mpa)
107{
108	xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = mpa >> PAGE_SHIFT;
109}
110
111static __inline void
112xpmap_ptom_unmap(paddr_t ppa)
113{
114	xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT] = INVALID_P2M_ENTRY;
115}
116
117static __inline bool
118xpmap_ptom_isvalid(paddr_t ppa)
119{
120	return (
121	    xpmap_phys_to_machine_mapping[ppa >> PAGE_SHIFT]
122	    != INVALID_P2M_ENTRY);
123}
124
125
126static inline void
127MULTI_update_va_mapping(
128	multicall_entry_t *mcl, vaddr_t va,
129	pt_entry_t new_val, unsigned long flags)
130{
131	mcl->op = __HYPERVISOR_update_va_mapping;
132	mcl->args[0] = va;
133#if defined(__x86_64__)
134	mcl->args[1] = new_val;
135	mcl->args[2] = flags;
136#else
137	mcl->args[1] = (new_val & 0xffffffff);
138	mcl->args[2] = (new_val >> 32);
139	mcl->args[3] = flags;
140#endif
141}
142
143static inline void
144MULTI_update_va_mapping_otherdomain(
145	multicall_entry_t *mcl, vaddr_t va,
146	pt_entry_t new_val, unsigned long flags, domid_t domid)
147{
148	mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
149	mcl->args[0] = va;
150#if defined(__x86_64__)
151	mcl->args[1] = new_val;
152	mcl->args[2] = flags;
153	mcl->args[3] = domid;
154#else
155	mcl->args[1] = (new_val & 0xffffffff);
156	mcl->args[2] = (new_val >> 32);
157	mcl->args[3] = flags;
158	mcl->args[4] = domid;
159#endif
160}
161#if defined(__x86_64__)
162#define MULTI_UVMFLAGS_INDEX 2
163#define MULTI_UVMDOMID_INDEX 3
164#else
165#define MULTI_UVMFLAGS_INDEX 3
166#define MULTI_UVMDOMID_INDEX 4
167#endif
168
169#if defined(__x86_64__)
170void xen_set_user_pgd(paddr_t);
171#endif
172#endif /* XENPV */
173
174#endif /* _XEN_XENPMAP_H_ */
175