1/*	$NetBSD: xenfunc.c,v 1.29 2022/08/20 23:48:51 riastradh Exp $	*/
2
3/*
4 * Copyright (c) 2004 Christian Limpach.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28#include <sys/cdefs.h>
29__KERNEL_RCSID(0, "$NetBSD: xenfunc.c,v 1.29 2022/08/20 23:48:51 riastradh Exp $");
30
31#include <sys/param.h>
32
33#include <uvm/uvm_extern.h>
34
35#include <machine/intr.h>
36#include <machine/vmparam.h>
37#include <machine/pmap.h>
38#include <machine/pmap_private.h>
39#include <xen/xen.h>
40#include <xen/hypervisor.h>
41//#include <xen/evtchn.h>
42#include <xen/xenpmap.h>
43#include <machine/pte.h>
44
45#define MAX_XEN_IDT 128
46
47void xen_set_ldt(vaddr_t, uint32_t);
48
49void
50invlpg(vaddr_t addr)
51{
52	int s = splvm(); /* XXXSMP */
53	xpq_queue_invlpg(addr);
54	splx(s);
55}
56
57void
58lidt(struct region_descriptor *rd)
59{
60	/*
61	 * We need to do this because we can't assume kmem_alloc(9)
62	 * will be available at the boot stage when this is called.
63	 */
64	static char xen_idt_page[PAGE_SIZE] __attribute__((__aligned__ (PAGE_SIZE)));
65#if defined(__x86_64__)
66	kpreempt_disable();
67#endif
68	memset(xen_idt_page, 0, PAGE_SIZE);
69
70	struct trap_info *xen_idt = (void * )xen_idt_page;
71	int xen_idt_idx = 0;
72
73	struct trap_info * idd = (void *) rd->rd_base;
74	const int nidt = rd->rd_limit / (sizeof *idd);
75
76	int i;
77
78	/*
79	 * Sweep in all initialised entries, consolidate them back to
80	 * back in the requestor array.
81	 */
82	for (i = 0; i < nidt; i++) {
83		if (idd[i].address == 0) /* Skip gap */
84			continue;
85		KASSERT(xen_idt_idx < MAX_XEN_IDT);
86		/* Copy over entry */
87		xen_idt[xen_idt_idx++] = idd[i];
88	}
89
90#if defined(__x86_64__)
91	/* page needs to be r/o */
92	pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ);
93#endif /* __x86_64 */
94
95	/* Hook it up in the hypervisor */
96	if (HYPERVISOR_set_trap_table(xen_idt))
97		panic("HYPERVISOR_set_trap_table() failed");
98
99#if defined(__x86_64__)
100	/* reset */
101	pmap_changeprot_local((vaddr_t) xen_idt, VM_PROT_READ|VM_PROT_WRITE);
102	kpreempt_enable();
103#endif /* __x86_64 */
104}
105
106void
107lldt(u_short sel)
108{
109#ifndef __x86_64__
110	struct cpu_info *ci;
111
112	ci = curcpu();
113
114	if (ci->ci_curldt == sel)
115		return;
116	if (sel == GSEL(GLDT_SEL, SEL_KPL))
117		xen_set_ldt((vaddr_t)ldtstore, NLDT);
118	else
119		xen_set_ldt(ci->ci_gdt[IDXSELN(sel)].ld.ld_base,
120		    ci->ci_gdt[IDXSELN(sel)].ld.ld_entries);
121	ci->ci_curldt = sel;
122#endif
123}
124
125void
126ltr(u_short sel)
127{
128	panic("XXX ltr not supported\n");
129}
130
131void
132lcr0(register_t val)
133{
134	panic("XXX lcr0 not supported\n");
135}
136
137register_t
138rcr0(void)
139{
140	/* XXX: handle X86_CR0_TS ? */
141	return 0;
142}
143
144#ifndef __x86_64__
145void
146lcr3(register_t val)
147{
148	int s = splvm();
149	xpq_queue_pt_switch(xpmap_ptom_masked(val));
150	splx(s);
151}
152#endif
153
154void
155tlbflush(void)
156{
157	int s = splvm();
158	xpq_queue_tlb_flush();
159	splx(s);
160}
161
162void
163tlbflushg(void)
164{
165	tlbflush();
166}
167
168register_t
169rdr0(void)
170{
171
172	return HYPERVISOR_get_debugreg(0);
173}
174
175void
176ldr0(register_t val)
177{
178
179	HYPERVISOR_set_debugreg(0, val);
180}
181
182register_t
183rdr1(void)
184{
185
186	return HYPERVISOR_get_debugreg(1);
187}
188
189void
190ldr1(register_t val)
191{
192
193	HYPERVISOR_set_debugreg(1, val);
194}
195
196register_t
197rdr2(void)
198{
199
200	return HYPERVISOR_get_debugreg(2);
201}
202
203void
204ldr2(register_t val)
205{
206
207	HYPERVISOR_set_debugreg(2, val);
208}
209
210register_t
211rdr3(void)
212{
213
214	return HYPERVISOR_get_debugreg(3);
215}
216
217void
218ldr3(register_t val)
219{
220
221	HYPERVISOR_set_debugreg(3, val);
222}
223register_t
224rdr6(void)
225{
226
227	return HYPERVISOR_get_debugreg(6);
228}
229
230void
231ldr6(register_t val)
232{
233
234	HYPERVISOR_set_debugreg(6, val);
235}
236
237register_t
238rdr7(void)
239{
240
241	return HYPERVISOR_get_debugreg(7);
242}
243
244void
245ldr7(register_t val)
246{
247
248	HYPERVISOR_set_debugreg(7, val);
249}
250
251void
252wbinvd(void)
253{
254
255	xpq_flush_cache();
256}
257
258register_t
259rcr2(void)
260{
261	return curcpu()->ci_vcpu->arch.cr2;
262}
263
264void
265lcr2(register_t v)
266{
267	curcpu()->ci_vcpu->arch.cr2 = v;
268}
269
270#ifdef __x86_64__
271void
272setusergs(int gssel)
273{
274	HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gssel);
275}
276#endif
277