1/*	$OpenBSD: xenreg.h,v 1.11 2022/01/09 05:42:58 jsg Exp $	*/
2
3/*
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 *
22 * Copyright (c) 2004,2005,2006,2007, Keir Fraser <keir@xensource.com>
23 */
24
25#ifndef _DEV_PV_XENREG_H_
26#define _DEV_PV_XENREG_H_
27
28/*
29 * Hypercall interface defines
30 */
31
32#if defined(__amd64__)
33# define HYPERCALL_ARG1(_i1)	ulong _a1 = (ulong)(_i1)
34# define HYPERCALL_ARG2(_i2)	ulong _a2 = (ulong)(_i2)
35# define HYPERCALL_ARG3(_i3)	ulong _a3 = (ulong)(_i3)
36# define HYPERCALL_ARG4(_i4)	register ulong _a4 __asm__("r10") = (ulong)(_i4)
37# define HYPERCALL_ARG5(_i5)	register ulong _a5 __asm__("r8") = (ulong)(_i5)
38# define HYPERCALL_RES1		ulong _r1
39# define HYPERCALL_RES2		ulong _r2
40# define HYPERCALL_RES3		ulong _r3
41# define HYPERCALL_RES4		ulong _r4
42# define HYPERCALL_RES5		/* empty */
43# define HYPERCALL_RES6		/* empty */
44# define HYPERCALL_RET(_rv)	(_rv) = _r1
45# define HYPERCALL_LABEL	"call *%[hcall]"
46# define HYPERCALL_PTR(_ptr)	[hcall] "a" (_ptr)
47# define HYPERCALL_OUT1		"=a" (_r1)
48# define HYPERCALL_OUT2		, "=D" (_r2)
49# define HYPERCALL_OUT3		, "=S" (_r3)
50# define HYPERCALL_OUT4		, "=d" (_r4)
51# define HYPERCALL_OUT5		, "+r" (_a4)
52# define HYPERCALL_OUT6		, "+r" (_a5)
53# define HYPERCALL_IN1		"1" (_a1)
54# define HYPERCALL_IN2		, "2" (_a2)
55# define HYPERCALL_IN3		, "3" (_a3)
56# define HYPERCALL_IN4		/* empty */
57# define HYPERCALL_IN5		/* empty */
58# define HYPERCALL_CLOBBER	"memory"
59#elif defined(__i386__)
60# define HYPERCALL_ARG1(_i1)	ulong _a1 = (ulong)(_i1)
61# define HYPERCALL_ARG2(_i2)	ulong _a2 = (ulong)(_i2)
62# define HYPERCALL_ARG3(_i3)	ulong _a3 = (ulong)(_i3)
63# define HYPERCALL_ARG4(_i4)	ulong _a4 = (ulong)(_i4)
64# define HYPERCALL_ARG5(_i5)	ulong _a5 = (ulong)(_i5)
65# define HYPERCALL_RES1		ulong _r1
66# define HYPERCALL_RES2		ulong _r2
67# define HYPERCALL_RES3		ulong _r3
68# define HYPERCALL_RES4		ulong _r4
69# define HYPERCALL_RES5		ulong _r5
70# define HYPERCALL_RES6		ulong _r6
71# define HYPERCALL_RET(_rv)	(_rv) = _r1
72# define HYPERCALL_LABEL	"call *%[hcall]"
73# define HYPERCALL_PTR(_ptr)	[hcall] "a" (_ptr)
74# define HYPERCALL_OUT1		"=a" (_r1)
75# define HYPERCALL_OUT2		, "=b" (_r2)
76# define HYPERCALL_OUT3		, "=c" (_r3)
77# define HYPERCALL_OUT4		, "=d" (_r4)
78# define HYPERCALL_OUT5		, "=S" (_r5)
79# define HYPERCALL_OUT6		, "=D" (_r6)
80# define HYPERCALL_IN1		"1" (_a1)
81# define HYPERCALL_IN2		, "2" (_a2)
82# define HYPERCALL_IN3		, "3" (_a3)
83# define HYPERCALL_IN4		, "4" (_a4)
84# define HYPERCALL_IN5		, "5" (_a5)
85# define HYPERCALL_CLOBBER	"memory"
86#else
87# error "Not implemented"
88#endif
89
90/* Hypercall not implemented */
91#define ENOXENSYS		38
92
93
94#if defined(__i386__) || defined(__amd64__)
95struct arch_vcpu_info {
96	unsigned long cr2;
97	unsigned long pad;
98} __packed;
99
100typedef unsigned long xen_pfn_t;
101typedef unsigned long xen_ulong_t;
102
103/* Maximum number of virtual CPUs in legacy multi-processor guests. */
104#define XEN_LEGACY_MAX_VCPUS 32
105
106struct arch_shared_info {
107	unsigned long max_pfn;	/* max pfn that appears in table */
108	/*
109	 * Frame containing list of mfns containing list of mfns containing p2m.
110	 */
111	xen_pfn_t pfn_to_mfn_frame_list;
112	unsigned long nmi_reason;
113	uint64_t pad[32];
114} __packed;
115#else
116#error "Not implemented"
117#endif	/* __i386__ || __amd64__ */
118
119/*
120 * interface/xen.h
121 */
122
123typedef uint16_t domid_t;
124
125/* DOMID_SELF is used in certain contexts to refer to oneself. */
126#define DOMID_SELF		(0x7FF0U)
127
128/*
129 * Event channel endpoints per domain:
130 *  1024 if a long is 32 bits; 4096 if a long is 64 bits.
131 */
132#define NR_EVENT_CHANNELS (sizeof(unsigned long) * sizeof(unsigned long) * 64)
133
134struct vcpu_time_info {
135	/*
136	 * Updates to the following values are preceded and followed by an
137	 * increment of 'version'. The guest can therefore detect updates by
138	 * looking for changes to 'version'. If the least-significant bit of
139	 * the version number is set then an update is in progress and the
140	 * guest must wait to read a consistent set of values.
141	 *
142	 * The correct way to interact with the version number is similar to
143	 * Linux's seqlock: see the implementations of read_seqbegin and
144	 * read_seqretry.
145	 */
146	uint32_t version;
147	uint32_t pad0;
148	uint64_t tsc_timestamp;	/* TSC at last update of time vals.  */
149	uint64_t system_time;	/* Time, in nanosecs, since boot.    */
150	/*
151	 * Current system time:
152	 *   system_time +
153	 *   ((((tsc - tsc_timestamp) << tsc_shift) * tsc_to_system_mul) >> 32)
154	 * CPU frequency (Hz):
155	 *   ((10^9 << 32) / tsc_to_system_mul) >> tsc_shift
156	 */
157	uint32_t tsc_to_system_mul;
158	int8_t tsc_shift;
159	int8_t pad1[3];
160} __packed; /* 32 bytes */
161
162struct vcpu_info {
163	/*
164	 * 'evtchn_upcall_pending' is written non-zero by Xen to indicate
165	 * a pending notification for a particular VCPU. It is then cleared
166	 * by the guest OS /before/ checking for pending work, thus avoiding
167	 * a set-and-check race. Note that the mask is only accessed by Xen
168	 * on the CPU that is currently hosting the VCPU. This means that the
169	 * pending and mask flags can be updated by the guest without special
170	 * synchronisation (i.e., no need for the x86 LOCK prefix).
171	 * This may seem suboptimal because if the pending flag is set by
172	 * a different CPU then an IPI may be scheduled even when the mask
173	 * is set. However, note:
174	 *  1. The task of 'interrupt holdoff' is covered by the per-event-
175	 *     channel mask bits. A 'noisy' event that is continually being
176	 *     triggered can be masked at source at this very precise
177	 *     granularity.
178	 *  2. The main purpose of the per-VCPU mask is therefore to restrict
179	 *     reentrant execution: whether for concurrency control, or to
180	 *     prevent unbounded stack usage. Whatever the purpose, we expect
181	 *     that the mask will be asserted only for short periods at a time,
182	 *     and so the likelihood of a 'spurious' IPI is suitably small.
183	 * The mask is read before making an event upcall to the guest: a
184	 * non-zero mask therefore guarantees that the VCPU will not receive
185	 * an upcall activation. The mask is cleared when the VCPU requests
186	 * to block: this avoids wakeup-waiting races.
187	 */
188	uint8_t evtchn_upcall_pending;
189	uint8_t pad1[3];
190	uint8_t evtchn_upcall_mask;
191	uint8_t pad2[3];
192	unsigned long evtchn_pending_sel;
193	struct arch_vcpu_info arch;
194	struct vcpu_time_info time;
195} __packed; /* 64 bytes (x86) */
196
197/*
198 * Xen/kernel shared data -- pointer provided in start_info.
199 *
200 * This structure is defined to be both smaller than a page, and the only data
201 * on the shared page, but may vary in actual size even within compatible Xen
202 * versions; guests should not rely on the size of this structure remaining
203 * constant.
204 */
205struct shared_info {
206	struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
207
208	/*
209	 * A domain can create "event channels" on which it can send and
210	 * receive asynchronous event notifications. There are three classes
211	 * of event that are delivered by this mechanism:
212	 *  1. Bi-directional inter- and intra-domain connections.  Domains
213	 *     must arrange out-of-band to set up a connection (usually by
214	 *     allocating an unbound 'listener' port and advertising that via
215	 *     a storage service such as xenstore).
216	 *  2. Physical interrupts. A domain with suitable hardware-access
217	 *     privileges can bind an event-channel port to a physical
218	 *     interrupt source.
219	 *  3. Virtual interrupts ('events'). A domain can bind an event
220	 *     channel port to a virtual interrupt source, such as the
221	 *     virtual-timer device or the emergency console.
222	 *
223	 * Event channels are addressed by a "port index". Each channel is
224	 * associated with two bits of information:
225	 *  1. PENDING -- notifies the domain that there is a pending
226	 *     notification to be processed. This bit is cleared by the guest.
227	 *  2. MASK -- if this bit is clear then a 0->1 transition of PENDING
228	 *     will cause an asynchronous upcall to be scheduled. This bit is
229	 *     only updated by the guest. It is read-only within Xen. If a
230	 *     channel becomes pending while the channel is masked then the
231	 *     'edge' is lost (i.e., when the channel is unmasked, the guest
232	 *     must manually handle pending notifications as no upcall will be
233	 *     scheduled by Xen).
234	 *
235	 * To expedite scanning of pending notifications, any 0->1 pending
236	 * transition on an unmasked channel causes a corresponding bit in a
237	 * per-vcpu selector word to be set. Each bit in the selector covers a
238	 * 'C long' in the PENDING bitfield array.
239	 */
240	volatile unsigned long evtchn_pending[sizeof(unsigned long) * 8];
241	volatile unsigned long evtchn_mask[sizeof(unsigned long) * 8];
242
243	/*
244	 * Wallclock time: updated only by control software. Guests should
245	 * base their gettimeofday() syscall on this wallclock-base value.
246	 */
247	uint32_t wc_version;	/* Version counter: see vcpu_time_info_t. */
248	uint32_t wc_sec;	/* Secs  00:00:00 UTC, Jan 1, 1970.  */
249	uint32_t wc_nsec;	/* Nsecs 00:00:00 UTC, Jan 1, 1970.  */
250
251	struct arch_shared_info arch;
252} __packed;
253
254
255/*
256 * interface/hvm/hvm_op.h
257 */
258
259/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
260#define HVMOP_set_param		0
261#define HVMOP_get_param		1
262struct xen_hvm_param {
263	domid_t  domid;		/* IN */
264	uint32_t index;		/* IN */
265	uint64_t value;		/* IN/OUT */
266};
267
268/*
269 * Parameter space for HVMOP_{set,get}_param.
270 */
271
272/*
273 * How should CPU0 event-channel notifications be delivered?
274 * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
275 * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
276 *                  Domain = val[47:32], Bus  = val[31:16],
277 *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
278 * val[63:56] == 2: val[7:0] is a vector number, check for
279 *                  XENFEAT_hvm_callback_vector to know if this delivery
280 *                  method is available.
281 * If val == 0 then CPU0 event-channel notifications are not delivered.
282 */
283#define HVM_PARAM_CALLBACK_IRQ			0
284
285/*
286 * These are not used by Xen. They are here for convenience of HVM-guest
287 * xenbus implementations.
288 */
289#define HVM_PARAM_STORE_PFN			1
290#define HVM_PARAM_STORE_EVTCHN			2
291
292#define HVM_PARAM_PAE_ENABLED			4
293
294#define HVM_PARAM_IOREQ_PFN			5
295
296#define HVM_PARAM_BUFIOREQ_PFN			6
297#define HVM_PARAM_BUFIOREQ_EVTCHN		26
298
299/*
300 * Set mode for virtual timers (currently x86 only):
301 *  delay_for_missed_ticks (default):
302 *   Do not advance a vcpu's time beyond the correct delivery time for
303 *   interrupts that have been missed due to preemption. Deliver missed
304 *   interrupts when the vcpu is rescheduled and advance the vcpu's virtual
305 *   time stepwise for each one.
306 *  no_delay_for_missed_ticks:
307 *   As above, missed interrupts are delivered, but guest time always tracks
308 *   wallclock (i.e., real) time while doing so.
309 *  no_missed_ticks_pending:
310 *   No missed interrupts are held pending. Instead, to ensure ticks are
311 *   delivered at some non-zero rate, if we detect missed ticks then the
312 *   internal tick alarm is not disabled if the VCPU is preempted during the
313 *   next tick period.
314 *  one_missed_tick_pending:
315 *   Missed interrupts are collapsed together and delivered as one 'late tick'.
316 *   Guest time always tracks wallclock (i.e., real) time.
317 */
318#define HVM_PARAM_TIMER_MODE			10
319#define HVMPTM_delay_for_missed_ticks		 0
320#define HVMPTM_no_delay_for_missed_ticks	 1
321#define HVMPTM_no_missed_ticks_pending		 2
322#define HVMPTM_one_missed_tick_pending		 3
323
324/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
325#define HVM_PARAM_HPET_ENABLED			11
326
327/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
328#define HVM_PARAM_IDENT_PT			12
329
330/* Device Model domain, defaults to 0. */
331#define HVM_PARAM_DM_DOMAIN			13
332
333/* ACPI S state: currently support S0 and S3 on x86. */
334#define HVM_PARAM_ACPI_S_STATE			14
335
336/* TSS used on Intel when CR0.PE=0. */
337#define HVM_PARAM_VM86_TSS			15
338
339/* Boolean: Enable aligning all periodic vpts to reduce interrupts */
340#define HVM_PARAM_VPT_ALIGN			16
341
342/* Console debug shared memory ring and event channel */
343#define HVM_PARAM_CONSOLE_PFN			17
344#define HVM_PARAM_CONSOLE_EVTCHN		18
345
346/*
347 * Select location of ACPI PM1a and TMR control blocks. Currently two locations
348 * are supported, specified by version 0 or 1 in this parameter:
349 *   - 0: default, use the old addresses
350 *        PM1A_EVT == 0x1f40; PM1A_CNT == 0x1f44; PM_TMR == 0x1f48
351 *   - 1: use the new default qemu addresses
352 *        PM1A_EVT == 0xb000; PM1A_CNT == 0xb004; PM_TMR == 0xb008
353 * You can find these address definitions in <hvm/ioreq.h>
354 */
355#define HVM_PARAM_ACPI_IOPORTS_LOCATION		19
356
357/* Enable blocking memory events, async or sync (pause vcpu until response)
358 * onchangeonly indicates messages only on a change of value */
359#define HVM_PARAM_MEMORY_EVENT_CR0		20
360#define HVM_PARAM_MEMORY_EVENT_CR3		21
361#define HVM_PARAM_MEMORY_EVENT_CR4		22
362#define HVM_PARAM_MEMORY_EVENT_INT3		23
363#define HVM_PARAM_MEMORY_EVENT_SINGLE_STEP	25
364
365#define HVMPME_MODE_MASK			(3 << 0)
366#define HVMPME_mode_disabled			 0
367#define HVMPME_mode_async			 1
368#define HVMPME_mode_sync			 2
369#define HVMPME_onchangeonly			(1 << 2)
370
371/* Boolean: Enable nestedhvm (hvm only) */
372#define HVM_PARAM_NESTEDHVM			24
373
374/* Params for the mem event rings */
375#define HVM_PARAM_PAGING_RING_PFN		27
376#define HVM_PARAM_ACCESS_RING_PFN		28
377#define HVM_PARAM_SHARING_RING_PFN		29
378
379#define HVM_NR_PARAMS				30
380
381/** The callback method types for Hypervisor event delivery to our domain. */
382enum {
383	HVM_CB_TYPE_GSI,
384	HVM_CB_TYPE_PCI_INTX,
385	HVM_CB_TYPE_VECTOR,
386	HVM_CB_TYPE_MASK		= 0xFF,
387	HVM_CB_TYPE_SHIFT		= 56
388};
389
390/** Format for specifying a GSI type callback. */
391enum {
392	HVM_CB_GSI_GSI_MASK		= 0xFFFFFFFF,
393	HVM_CB_GSI_GSI_SHIFT		= 0
394};
395#define HVM_CALLBACK_GSI(gsi) \
396	(((uint64_t)HVM_CB_TYPE_GSI << HVM_CB_TYPE_SHIFT) | \
397	 ((gsi) & HVM_CB_GSI_GSI_MASK) << HVM_CB_GSI_GSI_SHIFT)
398
399/** Format for specifying a virtual PCI interrupt line GSI style callback. */
400enum {
401	HVM_CB_PCI_INTX_INTPIN_MASK	= 0x3,
402	HVM_CB_PCI_INTX_INTPIN_SHIFT	= 0,
403	HVM_CB_PCI_INTX_SLOT_MASK	= 0x1F,
404	HVM_CB_PCI_INTX_SLOT_SHIFT	= 11,
405};
406#define HVM_CALLBACK_PCI_INTX(slot, pin) \
407	(((uint64_t)HVM_CB_TYPE_PCI_INTX << HVM_CB_TYPE_SHIFT) | \
408	 (((slot) & HVM_CB_PCI_INTX_SLOT_MASK) << HVM_CB_PCI_INTX_SLOT_SHIFT) | \
409	 (((pin) & HVM_CB_PCI_INTX_INTPIN_MASK) << HVM_CB_PCI_INTX_INTPIN_SHIFT))
410
411/** Format for specifying a direct IDT vector injection style callback. */
412enum {
413	HVM_CB_VECTOR_VECTOR_MASK	= 0xFFFFFFFF,
414	HVM_CB_VECTOR_VECTOR_SHIFT	= 0
415};
416#define HVM_CALLBACK_VECTOR(vector) \
417	(((uint64_t)HVM_CB_TYPE_VECTOR << HVM_CB_TYPE_SHIFT) | \
418	 (((vector) & HVM_CB_GSI_GSI_MASK) << HVM_CB_GSI_GSI_SHIFT))
419
420
421/*
422 * interface/event_channel.h
423 *
424 * Event channels between domains.
425 */
426
427#define EVTCHNOP_bind_interdomain	0
428#define EVTCHNOP_bind_virq		1
429#define EVTCHNOP_bind_pirq		2
430#define EVTCHNOP_close			3
431#define EVTCHNOP_send			4
432#define EVTCHNOP_status			5
433#define EVTCHNOP_alloc_unbound		6
434#define EVTCHNOP_bind_ipi		7
435#define EVTCHNOP_bind_vcpu		8
436#define EVTCHNOP_unmask			9
437#define EVTCHNOP_reset			10
438
439typedef uint32_t evtchn_port_t;
440
441/*
442 * EVTCHNOP_alloc_unbound: Allocate a port in domain <dom> and mark as
443 * accepting interdomain bindings from domain <remote_dom>. A fresh port
444 * is allocated in <dom> and returned as <port>.
445 * NOTES:
446 *  1. If the caller is unprivileged then <dom> must be DOMID_SELF.
447 *  2. <rdom> may be DOMID_SELF, allowing loopback connections.
448 */
449struct evtchn_alloc_unbound {
450	/* IN parameters */
451	domid_t dom, remote_dom;
452	/* OUT parameters */
453	evtchn_port_t port;
454};
455
456/*
457 * EVTCHNOP_close: Close a local event channel <port>. If the channel is
458 * interdomain then the remote end is placed in the unbound state
459 * (EVTCHNSTAT_unbound), awaiting a new connection.
460 */
461struct evtchn_close {
462	/* IN parameters. */
463	evtchn_port_t port;
464};
465
466/*
467 * EVTCHNOP_send: Send an event to the remote end of the channel whose local
468 * endpoint is <port>.
469 */
470struct evtchn_send {
471	/* IN parameters. */
472	evtchn_port_t port;
473};
474
475/*
476 * EVTCHNOP_status: Get the current status of the communication channel which
477 * has an endpoint at <dom, port>.
478 * NOTES:
479 *  1. <dom> may be specified as DOMID_SELF.
480 *  2. Only a sufficiently-privileged domain may obtain the status of an event
481 *     channel for which <dom> is not DOMID_SELF.
482 */
483struct evtchn_status {
484	/* IN parameters */
485	domid_t  dom;
486	evtchn_port_t port;
487	/* OUT parameters */
488#define EVTCHNSTAT_closed	0  /* Channel is not in use.                 */
489#define EVTCHNSTAT_unbound	1  /* Channel is waiting interdom connection.*/
490#define EVTCHNSTAT_interdomain	2  /* Channel is connected to remote domain. */
491#define EVTCHNSTAT_pirq		3  /* Channel is bound to a phys IRQ line.   */
492#define EVTCHNSTAT_virq		4  /* Channel is bound to a virtual IRQ line */
493#define EVTCHNSTAT_ipi		5  /* Channel is bound to a virtual IPI line */
494	uint32_t status;
495	uint32_t vcpu;		   /* VCPU to which this channel is bound.   */
496	union {
497		struct {
498			domid_t dom;
499		} unbound;	   /* EVTCHNSTAT_unbound */
500		struct {
501			domid_t dom;
502			evtchn_port_t port;
503		} interdomain;	   /* EVTCHNSTAT_interdomain */
504		uint32_t pirq;	   /* EVTCHNSTAT_pirq */
505		uint32_t virq;	   /* EVTCHNSTAT_virq */
506	} u;
507};
508
509/*
510 * EVTCHNOP_bind_vcpu: Specify which vcpu a channel should notify when an
511 * event is pending.
512 * NOTES:
513 *  1. IPI-bound channels always notify the vcpu specified at bind time.
514 *     This binding cannot be changed.
515 *  2. Per-VCPU VIRQ channels always notify the vcpu specified at bind time.
516 *     This binding cannot be changed.
517 *  3. All other channels notify vcpu0 by default. This default is set when
518 *     the channel is allocated (a port that is freed and subsequently reused
519 *     has its binding reset to vcpu0).
520 */
521struct evtchn_bind_vcpu {
522	/* IN parameters. */
523	evtchn_port_t port;
524	uint32_t vcpu;
525};
526
527/*
528 * EVTCHNOP_unmask: Unmask the specified local event-channel port and deliver
529 * a notification to the appropriate VCPU if an event is pending.
530 */
531struct evtchn_unmask {
532	/* IN parameters. */
533	evtchn_port_t port;
534};
535
536/*
537 * Superceded by new event_channel_op() hypercall since 0x00030202.
538 */
539struct evtchn_op {
540	uint32_t cmd;		/* EVTCHNOP_* */
541	union {
542		struct evtchn_alloc_unbound alloc_unbound;
543		struct evtchn_close close;
544		struct evtchn_send send;
545		struct evtchn_status status;
546		struct evtchn_bind_vcpu bind_vcpu;
547		struct evtchn_unmask unmask;
548	} u;
549};
550
551/*
552 * interface/features.h
553 *
554 * Feature flags, reported by XENVER_get_features.
555 */
556
557/*
558 * If set, the guest does not need to write-protect its pagetables, and can
559 * update them via direct writes.
560 */
561#define XENFEAT_writable_page_tables		0
562/*
563 * If set, the guest does not need to write-protect its segment descriptor
564 * tables, and can update them via direct writes.
565 */
566#define XENFEAT_writable_descriptor_tables	1
567/*
568 * If set, translation between the guest's 'pseudo-physical' address space
569 * and the host's machine address space are handled by the hypervisor. In this
570 * mode the guest does not need to perform phys-to/from-machine translations
571 * when performing page table operations.
572 */
573#define XENFEAT_auto_translated_physmap		2
574/* If set, the guest is running in supervisor mode (e.g., x86 ring 0). */
575#define XENFEAT_supervisor_mode_kernel		3
576/*
577 * If set, the guest does not need to allocate x86 PAE page directories
578 * below 4GB. This flag is usually implied by auto_translated_physmap.
579 */
580#define XENFEAT_pae_pgdir_above_4gb		4
581/* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
582#define XENFEAT_mmu_pt_update_preserve_ad	5
583/* x86: Does this Xen host support the MMU_{CLEAR,COPY}_PAGE hypercall? */
584#define XENFEAT_highmem_assist			6
585/*
586 * If set, GNTTABOP_map_grant_ref honors flags to be placed into guest kernel
587 * available pte bits.
588 */
589#define XENFEAT_gnttab_map_avail_bits		7
590/* x86: Does this Xen host support the HVM callback vector type? */
591#define XENFEAT_hvm_callback_vector		8
592/* x86: pvclock algorithm is safe to use on HVM */
593#define XENFEAT_hvm_safe_pvclock		9
594/* x86: pirq can be used by HVM guests */
595#define XENFEAT_hvm_pirqs			10
596/* operation as Dom0 is supported */
597#define XENFEAT_dom0				11
598
599
600/*
601 * interface/grant_table.h
602 */
603
604/*
605 * Reference to a grant entry in a specified domain's grant table.
606 */
607typedef uint32_t grant_ref_t;
608
609/*
610 * The first few grant table entries will be preserved across grant table
611 * version changes and may be pre-populated at domain creation by tools.
612 */
613#define GNTTAB_NR_RESERVED_ENTRIES		8
614
615/*
616 * Type of grant entry.
617 *  GTF_invalid: This grant entry grants no privileges.
618 *  GTF_permit_access: Allow @domid to map/access @frame.
619 *  GTF_accept_transfer: Allow @domid to transfer ownership of one page frame
620 *                       to this guest. Xen writes the page number to @frame.
621 *  GTF_transitive: Allow @domid to transitively access a subrange of
622 *                  @trans_grant in @trans_domid.  No mappings are allowed.
623 */
624#define GTF_invalid				(0<<0)
625#define GTF_permit_access			(1<<0)
626#define GTF_accept_transfer			(2<<0)
627#define GTF_transitive				(3<<0)
628#define GTF_type_mask				(3<<0)
629
630/*
631 * Subflags for GTF_permit_access.
632 *  GTF_readonly: Restrict @domid to read-only mappings and accesses. [GST]
633 *  GTF_reading: Grant entry is currently mapped for reading by @domid. [XEN]
634 *  GTF_writing: Grant entry is currently mapped for writing by @domid. [XEN]
635 *  GTF_PAT, GTF_PWT, GTF_PCD: (x86) cache attribute flags for the grant [GST]
636 */
637#define GTF_readonly				(1<<2)
638#define GTF_reading				(1<<3)
639#define GTF_writing				(1<<4)
640#define GTF_PWT					(1<<5)
641#define GTF_PCD					(1<<6)
642#define GTF_PAT					(1<<7)
643
644typedef struct grant_entry {
645	uint16_t flags;
646	domid_t domid;
647	uint32_t frame;
648} __packed grant_entry_t;
649
650/* Number of grant table entries per memory page */
651#define GNTTAB_NEPG			(PAGE_SIZE / sizeof(grant_entry_t))
652
653#define GNTTABOP_query_size			6
654#define GNTTABOP_set_version			8
655#define GNTTABOP_get_version			10
656
657/*
658 * GNTTABOP_query_size: Query the current and maximum sizes of the shared
659 * grant table.
660 * NOTES:
661 *  1. <dom> may be specified as DOMID_SELF.
662 *  2. Only a sufficiently-privileged domain may specify <dom> != DOMID_SELF.
663 */
664struct gnttab_query_size {
665	/* IN parameters. */
666	domid_t dom;
667	/* OUT parameters. */
668	uint32_t nr_frames;
669	uint32_t max_nr_frames;
670	int16_t status;		/* => enum grant_status */
671};
672
673/*
674 * GNTTABOP_set_version: Request a particular version of the grant
675 * table shared table structure.  This operation can only be performed
676 * once in any given domain.  It must be performed before any grants
677 * are activated; otherwise, the domain will be stuck with version 1.
678 * The only defined versions are 1 and 2.
679 */
680struct gnttab_set_version {
681	/* IN/OUT parameters */
682	uint32_t version;
683};
684
685/*
686 * GNTTABOP_get_version: Get the grant table version which is in
687 * effect for domain <dom>.
688 */
689struct gnttab_get_version {
690	/* IN parameters */
691	domid_t dom;
692	uint16_t pad;
693	/* OUT parameters */
694	uint32_t version;
695};
696
697
698/*
699 * interface/memory.h
700 *
701 * Memory reservation and information.
702 */
703
704/*
705 * Increase or decrease the specified domain's memory reservation.
706 * Returns the number of extents successfully allocated or freed.
707 * arg == addr of struct xen_memory_reservation.
708 */
709#define XENMEM_increase_reservation	0
710#define XENMEM_decrease_reservation	1
711#define XENMEM_populate_physmap		6
712
713#define XENMAPSPACE_shared_info		0	/* shared info page */
714#define XENMAPSPACE_grant_table		1	/* grant table page */
715#define XENMAPSPACE_gmfn		2	/* GMFN */
716#define XENMAPSPACE_gmfn_range		3	/* GMFN range */
717#define XENMAPSPACE_gmfn_foreign	4	/* GMFN from another domain */
718
719/*
720 * Sets the GPFN at which a particular page appears in the specified guest's
721 * pseudophysical address space.
722 * arg == addr of xen_add_to_physmap_t.
723 */
724#define XENMEM_add_to_physmap		7
725struct xen_add_to_physmap {
726	/* Which domain to change the mapping for. */
727	domid_t domid;
728
729	/* Number of pages to go through for gmfn_range */
730	uint16_t size;
731
732	/* Source mapping space. */
733#define XENMAPSPACE_shared_info	0 /* shared info page */
734#define XENMAPSPACE_grant_table	1 /* grant table page */
735#define XENMAPSPACE_gmfn	2 /* GMFN */
736#define XENMAPSPACE_gmfn_range	3 /* GMFN range */
737	unsigned int space;
738
739#define XENMAPIDX_grant_table_status 0x80000000
740
741	/* Index into source mapping space. */
742	xen_ulong_t idx;
743
744	/* GPFN where the source mapping page should appear. */
745	xen_pfn_t gpfn;
746};
747
748/*
749 * interface/version.h
750 *
751 * Xen version, type, and compile information.
752 */
753
754/* arg == NULL; returns major:minor (16:16). */
755#define XENVER_version		0
756
757/* arg == 16 bytes buffer. */
758#define XENVER_extraversion	1
759
760/* arg == xen_compile_info. */
761#define XENVER_compile_info	2
762struct xen_compile_info {
763	char compiler[64];
764	char compile_by[16];
765	char compile_domain[32];
766	char compile_date[32];
767};
768
769#define XENVER_get_features	6
770struct xen_feature_info {
771	unsigned int submap_idx;	/* IN: which 32-bit submap to return */
772	uint32_t submap;		/* OUT: 32-bit submap */
773};
774
775/* arg == NULL; returns host memory page size. */
776#define XENVER_pagesize		7
777
778/* arg == xen_domain_handle_t. */
779#define XENVER_guest_handle	8
780
781#define XENVER_commandline	9
782typedef char xen_commandline_t[1024];
783
784#endif /* _DEV_PV_XENREG_H_ */
785