1/*
2 * Permission is hereby granted, free of charge, to any person obtaining a copy
3 * of this software and associated documentation files (the "Software"), to
4 * deal in the Software without restriction, including without limitation the
5 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
6 * sell copies of the Software, and to permit persons to whom the Software is
7 * furnished to do so, subject to the following conditions:
8 *
9 * The above copyright notice and this permission notice shall be included in
10 * all copies or substantial portions of the Software.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
15 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
16 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
17 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
18 * DEALINGS IN THE SOFTWARE.
19 *
20 * Copyright (c) 2007, Keir Fraser
21 */
22
23#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
24#define __XEN_PUBLIC_HVM_HVM_OP_H__
25
26#include "../xen.h"
27#include "../trace.h"
28#include "../event_channel.h"
29
30/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
31#define HVMOP_set_param           0
32#define HVMOP_get_param           1
33struct xen_hvm_param {
34    domid_t  domid;    /* IN */
35    uint32_t index;    /* IN */
36    uint64_t value;    /* IN/OUT */
37};
38typedef struct xen_hvm_param xen_hvm_param_t;
39DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
40
41/* Set the logical level of one of a domain's PCI INTx wires. */
42#define HVMOP_set_pci_intx_level  2
43struct xen_hvm_set_pci_intx_level {
44    /* Domain to be updated. */
45    domid_t  domid;
46    /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
47    uint8_t  domain, bus, device, intx;
48    /* Assertion level (0 = unasserted, 1 = asserted). */
49    uint8_t  level;
50};
51typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
52DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
53
54/* Set the logical level of one of a domain's ISA IRQ wires. */
55#define HVMOP_set_isa_irq_level   3
56struct xen_hvm_set_isa_irq_level {
57    /* Domain to be updated. */
58    domid_t  domid;
59    /* ISA device identification, by ISA IRQ (0-15). */
60    uint8_t  isa_irq;
61    /* Assertion level (0 = unasserted, 1 = asserted). */
62    uint8_t  level;
63};
64typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
65DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
66
67#define HVMOP_set_pci_link_route  4
68struct xen_hvm_set_pci_link_route {
69    /* Domain to be updated. */
70    domid_t  domid;
71    /* PCI link identifier (0-3). */
72    uint8_t  link;
73    /* ISA IRQ (1-15), or 0 (disable link). */
74    uint8_t  isa_irq;
75};
76typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
77DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
78
79/* Flushes all VCPU TLBs: @arg must be NULL. */
80#define HVMOP_flush_tlbs          5
81
82typedef enum {
83    HVMMEM_ram_rw,             /* Normal read/write guest RAM */
84    HVMMEM_ram_ro,             /* Read-only; writes are discarded */
85    HVMMEM_mmio_dm,            /* Reads and write go to the device model */
86    HVMMEM_mmio_write_dm       /* Read-only; writes go to the device model */
87} hvmmem_type_t;
88
89/* Following tools-only interfaces may change in future. */
90#if defined(__XEN__) || defined(__XEN_TOOLS__)
91
92/* Track dirty VRAM. */
93#define HVMOP_track_dirty_vram    6
94struct xen_hvm_track_dirty_vram {
95    /* Domain to be tracked. */
96    domid_t  domid;
97    /* Number of pages to track. */
98    uint32_t nr;
99    /* First pfn to track. */
100    uint64_aligned_t first_pfn;
101    /* OUT variable. */
102    /* Dirty bitmap buffer. */
103    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
104};
105typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
106DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
107
108/* Notify that some pages got modified by the Device Model. */
109#define HVMOP_modified_memory    7
110struct xen_hvm_modified_memory {
111    /* Domain to be updated. */
112    domid_t  domid;
113    /* Number of pages. */
114    uint32_t nr;
115    /* First pfn. */
116    uint64_aligned_t first_pfn;
117};
118typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
119DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
120
121#define HVMOP_set_mem_type    8
122/* Notify that a region of memory is to be treated in a specific way. */
123struct xen_hvm_set_mem_type {
124    /* Domain to be updated. */
125    domid_t domid;
126    /* Memory type */
127    uint16_t hvmmem_type;
128    /* Number of pages. */
129    uint32_t nr;
130    /* First pfn. */
131    uint64_aligned_t first_pfn;
132};
133typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
134DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
135
136#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
137
138/* Hint from PV drivers for pagetable destruction. */
139#define HVMOP_pagetable_dying        9
140struct xen_hvm_pagetable_dying {
141    /* Domain with a pagetable about to be destroyed. */
142    domid_t  domid;
143    uint16_t pad[3]; /* align next field on 8-byte boundary */
144    /* guest physical address of the toplevel pagetable dying */
145    uint64_t gpa;
146};
147typedef struct xen_hvm_pagetable_dying xen_hvm_pagetable_dying_t;
148DEFINE_XEN_GUEST_HANDLE(xen_hvm_pagetable_dying_t);
149
150/* Get the current Xen time, in nanoseconds since system boot. */
151#define HVMOP_get_time              10
152struct xen_hvm_get_time {
153    uint64_t now;      /* OUT */
154};
155typedef struct xen_hvm_get_time xen_hvm_get_time_t;
156DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_time_t);
157
158#define HVMOP_xentrace              11
159struct xen_hvm_xentrace {
160    uint16_t event, extra_bytes;
161    uint8_t extra[TRACE_EXTRA_MAX * sizeof(uint32_t)];
162};
163typedef struct xen_hvm_xentrace xen_hvm_xentrace_t;
164DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
165
166/* Following tools-only interfaces may change in future. */
167#if defined(__XEN__) || defined(__XEN_TOOLS__)
168
169/* Deprecated by XENMEM_access_op_set_access */
170#define HVMOP_set_mem_access        12
171
172/* Deprecated by XENMEM_access_op_get_access */
173#define HVMOP_get_mem_access        13
174
175#define HVMOP_inject_trap            14
176/* Inject a trap into a VCPU, which will get taken up on the next
177 * scheduling of it. Note that the caller should know enough of the
178 * state of the CPU before injecting, to know what the effect of
179 * injecting the trap will be.
180 */
181struct xen_hvm_inject_trap {
182    /* Domain to be queried. */
183    domid_t domid;
184    /* VCPU */
185    uint32_t vcpuid;
186    /* Vector number */
187    uint32_t vector;
188    /* Trap type (HVMOP_TRAP_*) */
189    uint32_t type;
190/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
191# define HVMOP_TRAP_ext_int    0 /* external interrupt */
192# define HVMOP_TRAP_nmi        2 /* nmi */
193# define HVMOP_TRAP_hw_exc     3 /* hardware exception */
194# define HVMOP_TRAP_sw_int     4 /* software interrupt (CD nn) */
195# define HVMOP_TRAP_pri_sw_exc 5 /* ICEBP (F1) */
196# define HVMOP_TRAP_sw_exc     6 /* INT3 (CC), INTO (CE) */
197    /* Error code, or ~0u to skip */
198    uint32_t error_code;
199    /* Intruction length */
200    uint32_t insn_len;
201    /* CR2 for page faults */
202    uint64_aligned_t cr2;
203};
204typedef struct xen_hvm_inject_trap xen_hvm_inject_trap_t;
205DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_trap_t);
206
207#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
208
209#define HVMOP_get_mem_type    15
210/* Return hvmmem_type_t for the specified pfn. */
211struct xen_hvm_get_mem_type {
212    /* Domain to be queried. */
213    domid_t domid;
214    /* OUT variable. */
215    uint16_t mem_type;
216    uint16_t pad[2]; /* align next field on 8-byte boundary */
217    /* IN variable. */
218    uint64_t pfn;
219};
220typedef struct xen_hvm_get_mem_type xen_hvm_get_mem_type_t;
221DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_mem_type_t);
222
223/* Following tools-only interfaces may change in future. */
224#if defined(__XEN__) || defined(__XEN_TOOLS__)
225
226/* MSI injection for emulated devices */
227#define HVMOP_inject_msi         16
228struct xen_hvm_inject_msi {
229    /* Domain to be injected */
230    domid_t   domid;
231    /* Data -- lower 32 bits */
232    uint32_t  data;
233    /* Address (0xfeexxxxx) */
234    uint64_t  addr;
235};
236typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
237DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
238
239/*
240 * IOREQ Servers
241 *
242 * The interface between an I/O emulator an Xen is called an IOREQ Server.
243 * A domain supports a single 'legacy' IOREQ Server which is instantiated if
244 * parameter...
245 *
246 * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
247 * ioreq structures), or...
248 * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
249 * ioreq ring), or...
250 * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
251 * to request buffered I/O emulation).
252 *
253 * The following hypercalls facilitate the creation of IOREQ Servers for
254 * 'secondary' emulators which are invoked to implement port I/O, memory, or
255 * PCI config space ranges which they explicitly register.
256 */
257
258typedef uint16_t ioservid_t;
259
260/*
261 * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
262 *                            emulator servicing domain <domid>.
263 *
264 * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
265 * the buffered ioreq ring will not be allocated and hence all emulation
266 * requestes to this server will be synchronous.
267 */
268#define HVMOP_create_ioreq_server 17
269struct xen_hvm_create_ioreq_server {
270    domid_t domid;           /* IN - domain to be serviced */
271#define HVM_IOREQSRV_BUFIOREQ_OFF    0
272#define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
273/*
274 * Use this when read_pointer gets updated atomically and
275 * the pointer pair gets read atomically:
276 */
277#define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
278    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
279    ioservid_t id;           /* OUT - server id */
280};
281typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
282DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
283
284/*
285 * HVMOP_get_ioreq_server_info: Get all the information necessary to access
286 *                              IOREQ Server <id>.
287 *
288 * The emulator needs to map the synchronous ioreq structures and buffered
289 * ioreq ring (if it exists) that Xen uses to request emulation. These are
290 * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
291 * respectively. In addition, if the IOREQ Server is handling buffered
292 * emulation requests, the emulator needs to bind to event channel
293 * <bufioreq_port> to listen for them. (The event channels used for
294 * synchronous emulation requests are specified in the per-CPU ioreq
295 * structures in <ioreq_pfn>).
296 * If the IOREQ Server is not handling buffered emulation requests then the
297 * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
298 */
299#define HVMOP_get_ioreq_server_info 18
300struct xen_hvm_get_ioreq_server_info {
301    domid_t domid;                 /* IN - domain to be serviced */
302    ioservid_t id;                 /* IN - server id */
303    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
304    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
305    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
306};
307typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
308DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
309
310/*
311 * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
312 *                                   for emulation by the client of IOREQ
313 *                                   Server <id>
314 * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
315 *                                       for emulation by the client of IOREQ
316 *                                       Server <id>
317 *
318 * There are three types of I/O that can be emulated: port I/O, memory accesses
319 * and PCI config space accesses. The <type> field denotes which type of range
320 * the <start> and <end> (inclusive) fields are specifying.
321 * PCI config space ranges are specified by segment/bus/device/function values
322 * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
323 *
324 * NOTE: unless an emulation request falls entirely within a range mapped
325 * by a secondary emulator, it will not be passed to that emulator.
326 */
327#define HVMOP_map_io_range_to_ioreq_server 19
328#define HVMOP_unmap_io_range_from_ioreq_server 20
329struct xen_hvm_io_range {
330    domid_t domid;               /* IN - domain to be serviced */
331    ioservid_t id;               /* IN - server id */
332    uint32_t type;               /* IN - type of range */
333# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
334# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
335# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
336    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
337};
338typedef struct xen_hvm_io_range xen_hvm_io_range_t;
339DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
340
341#define HVMOP_PCI_SBDF(s,b,d,f)                 \
342	((((s) & 0xffff) << 16) |                   \
343	 (((b) & 0xff) << 8) |                      \
344	 (((d) & 0x1f) << 3) |                      \
345	 ((f) & 0x07))
346
347/*
348 * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
349 *                             <domid>.
350 *
351 * Any registered I/O ranges will be automatically deregistered.
352 */
353#define HVMOP_destroy_ioreq_server 21
354struct xen_hvm_destroy_ioreq_server {
355    domid_t domid; /* IN - domain to be serviced */
356    ioservid_t id; /* IN - server id */
357};
358typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
359DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
360
361/*
362 * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> servicing
363 *                               domain <domid>.
364 *
365 * The IOREQ Server will not be passed any emulation requests until it is in the
366 * enabled state.
367 * Note that the contents of the ioreq_pfn and bufioreq_fn (see
368 * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
369 * the enabled state.
370 */
371#define HVMOP_set_ioreq_server_state 22
372struct xen_hvm_set_ioreq_server_state {
373    domid_t domid;   /* IN - domain to be serviced */
374    ioservid_t id;   /* IN - server id */
375    uint8_t enabled; /* IN - enabled? */
376};
377typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
378DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
379
380#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
381
382#if defined(__i386__) || defined(__x86_64__)
383
384/*
385 * HVMOP_set_evtchn_upcall_vector: Set a <vector> that should be used for event
386 *                                 channel upcalls on the specified <vcpu>. If set,
387 *                                 this vector will be used in preference to the
388 *                                 domain global callback via (see
389 *                                 HVM_PARAM_CALLBACK_IRQ).
390 */
391#define HVMOP_set_evtchn_upcall_vector 23
392struct xen_hvm_evtchn_upcall_vector {
393    uint32_t vcpu;
394    uint8_t vector;
395};
396typedef struct xen_hvm_evtchn_upcall_vector xen_hvm_evtchn_upcall_vector_t;
397DEFINE_XEN_GUEST_HANDLE(xen_hvm_evtchn_upcall_vector_t);
398
399#endif /* defined(__i386__) || defined(__x86_64__) */
400
401#define HVMOP_guest_request_vm_event 24
402
403/* HVMOP_altp2m: perform altp2m state operations */
404#define HVMOP_altp2m 25
405
406#define HVMOP_ALTP2M_INTERFACE_VERSION 0x00000001
407
408struct xen_hvm_altp2m_domain_state {
409    /* IN or OUT variable on/off */
410    uint8_t state;
411};
412typedef struct xen_hvm_altp2m_domain_state xen_hvm_altp2m_domain_state_t;
413DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_domain_state_t);
414
415struct xen_hvm_altp2m_vcpu_enable_notify {
416    uint32_t vcpu_id;
417    uint32_t pad;
418    /* #VE info area gfn */
419    uint64_t gfn;
420};
421typedef struct xen_hvm_altp2m_vcpu_enable_notify xen_hvm_altp2m_vcpu_enable_notify_t;
422DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_vcpu_enable_notify_t);
423
424struct xen_hvm_altp2m_view {
425    /* IN/OUT variable */
426    uint16_t view;
427    /* Create view only: default access type
428     * NOTE: currently ignored */
429    uint16_t hvmmem_default_access; /* xenmem_access_t */
430};
431typedef struct xen_hvm_altp2m_view xen_hvm_altp2m_view_t;
432DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_view_t);
433
434struct xen_hvm_altp2m_set_mem_access {
435    /* view */
436    uint16_t view;
437    /* Memory type */
438    uint16_t hvmmem_access; /* xenmem_access_t */
439    uint32_t pad;
440    /* gfn */
441    uint64_t gfn;
442};
443typedef struct xen_hvm_altp2m_set_mem_access xen_hvm_altp2m_set_mem_access_t;
444DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_set_mem_access_t);
445
446struct xen_hvm_altp2m_change_gfn {
447    /* view */
448    uint16_t view;
449    uint16_t pad1;
450    uint32_t pad2;
451    /* old gfn */
452    uint64_t old_gfn;
453    /* new gfn, INVALID_GFN (~0UL) means revert */
454    uint64_t new_gfn;
455};
456typedef struct xen_hvm_altp2m_change_gfn xen_hvm_altp2m_change_gfn_t;
457DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_change_gfn_t);
458
459struct xen_hvm_altp2m_op {
460    uint32_t version;   /* HVMOP_ALTP2M_INTERFACE_VERSION */
461    uint32_t cmd;
462/* Get/set the altp2m state for a domain */
463#define HVMOP_altp2m_get_domain_state     1
464#define HVMOP_altp2m_set_domain_state     2
465/* Set the current VCPU to receive altp2m event notifications */
466#define HVMOP_altp2m_vcpu_enable_notify   3
467/* Create a new view */
468#define HVMOP_altp2m_create_p2m           4
469/* Destroy a view */
470#define HVMOP_altp2m_destroy_p2m          5
471/* Switch view for an entire domain */
472#define HVMOP_altp2m_switch_p2m           6
473/* Notify that a page of memory is to have specific access types */
474#define HVMOP_altp2m_set_mem_access       7
475/* Change a p2m entry to have a different gfn->mfn mapping */
476#define HVMOP_altp2m_change_gfn           8
477    domid_t domain;
478    uint16_t pad1;
479    uint32_t pad2;
480    union {
481        struct xen_hvm_altp2m_domain_state       domain_state;
482        struct xen_hvm_altp2m_vcpu_enable_notify enable_notify;
483        struct xen_hvm_altp2m_view               view;
484        struct xen_hvm_altp2m_set_mem_access     set_mem_access;
485        struct xen_hvm_altp2m_change_gfn         change_gfn;
486        uint8_t pad[64];
487    } u;
488};
489typedef struct xen_hvm_altp2m_op xen_hvm_altp2m_op_t;
490DEFINE_XEN_GUEST_HANDLE(xen_hvm_altp2m_op_t);
491
492#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
493
494/*
495 * Local variables:
496 * mode: C
497 * c-file-style: "BSD"
498 * c-basic-offset: 4
499 * tab-width: 4
500 * indent-tabs-mode: nil
501 * End:
502 */
503