1/******************************************************************************
2 * domctl.h
3 *
4 * Domain management operations. For use by node control stack.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2002-2003, B Dragovic
25 * Copyright (c) 2002-2006, K Fraser
26 */
27
28#ifndef __XEN_PUBLIC_DOMCTL_H__
29#define __XEN_PUBLIC_DOMCTL_H__
30
31#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
32#error "domctl operations are intended for use by node control tools only"
33#endif
34
35#include "xen.h"
36#include "grant_table.h"
37#include "hvm/save.h"
38#include "memory.h"
39
40#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000b
41
42/*
43 * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
44 * If it is specified as zero, an id is auto-allocated and returned.
45 */
46/* XEN_DOMCTL_createdomain */
47struct xen_domctl_createdomain {
48    /* IN parameters */
49    uint32_t ssidref;
50    xen_domain_handle_t handle;
51 /* Is this an HVM guest (as opposed to a PVH or PV guest)? */
52#define _XEN_DOMCTL_CDF_hvm_guest     0
53#define XEN_DOMCTL_CDF_hvm_guest      (1U<<_XEN_DOMCTL_CDF_hvm_guest)
54 /* Use hardware-assisted paging if available? */
55#define _XEN_DOMCTL_CDF_hap           1
56#define XEN_DOMCTL_CDF_hap            (1U<<_XEN_DOMCTL_CDF_hap)
57 /* Should domain memory integrity be verifed by tboot during Sx? */
58#define _XEN_DOMCTL_CDF_s3_integrity  2
59#define XEN_DOMCTL_CDF_s3_integrity   (1U<<_XEN_DOMCTL_CDF_s3_integrity)
60 /* Disable out-of-sync shadow page tables? */
61#define _XEN_DOMCTL_CDF_oos_off       3
62#define XEN_DOMCTL_CDF_oos_off        (1U<<_XEN_DOMCTL_CDF_oos_off)
63 /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
64#define _XEN_DOMCTL_CDF_pvh_guest     4
65#define XEN_DOMCTL_CDF_pvh_guest      (1U<<_XEN_DOMCTL_CDF_pvh_guest)
66    uint32_t flags;
67    struct xen_arch_domainconfig config;
68};
69typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
70DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
71
72/* XEN_DOMCTL_getdomaininfo */
73struct xen_domctl_getdomaininfo {
74    /* OUT variables. */
75    domid_t  domain;              /* Also echoed in domctl.domain */
76 /* Domain is scheduled to die. */
77#define _XEN_DOMINF_dying     0
78#define XEN_DOMINF_dying      (1U<<_XEN_DOMINF_dying)
79 /* Domain is an HVM guest (as opposed to a PV guest). */
80#define _XEN_DOMINF_hvm_guest 1
81#define XEN_DOMINF_hvm_guest  (1U<<_XEN_DOMINF_hvm_guest)
82 /* The guest OS has shut down. */
83#define _XEN_DOMINF_shutdown  2
84#define XEN_DOMINF_shutdown   (1U<<_XEN_DOMINF_shutdown)
85 /* Currently paused by control software. */
86#define _XEN_DOMINF_paused    3
87#define XEN_DOMINF_paused     (1U<<_XEN_DOMINF_paused)
88 /* Currently blocked pending an event.     */
89#define _XEN_DOMINF_blocked   4
90#define XEN_DOMINF_blocked    (1U<<_XEN_DOMINF_blocked)
91 /* Domain is currently running.            */
92#define _XEN_DOMINF_running   5
93#define XEN_DOMINF_running    (1U<<_XEN_DOMINF_running)
94 /* Being debugged.  */
95#define _XEN_DOMINF_debugged  6
96#define XEN_DOMINF_debugged   (1U<<_XEN_DOMINF_debugged)
97/* domain is PVH */
98#define _XEN_DOMINF_pvh_guest 7
99#define XEN_DOMINF_pvh_guest  (1U<<_XEN_DOMINF_pvh_guest)
100 /* XEN_DOMINF_shutdown guest-supplied code.  */
101#define XEN_DOMINF_shutdownmask 255
102#define XEN_DOMINF_shutdownshift 16
103    uint32_t flags;              /* XEN_DOMINF_* */
104    uint64_aligned_t tot_pages;
105    uint64_aligned_t max_pages;
106    uint64_aligned_t outstanding_pages;
107    uint64_aligned_t shr_pages;
108    uint64_aligned_t paged_pages;
109    uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
110    uint64_aligned_t cpu_time;
111    uint32_t nr_online_vcpus;    /* Number of VCPUs currently online. */
112#define XEN_INVALID_MAX_VCPU_ID (~0U) /* Domain has no vcpus? */
113    uint32_t max_vcpu_id;        /* Maximum VCPUID in use by this domain. */
114    uint32_t ssidref;
115    xen_domain_handle_t handle;
116    uint32_t cpupool;
117};
118typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
119DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
120
121
122/* XEN_DOMCTL_getmemlist */
123struct xen_domctl_getmemlist {
124    /* IN variables. */
125    /* Max entries to write to output buffer. */
126    uint64_aligned_t max_pfns;
127    /* Start index in guest's page list. */
128    uint64_aligned_t start_pfn;
129    XEN_GUEST_HANDLE_64(uint64) buffer;
130    /* OUT variables. */
131    uint64_aligned_t num_pfns;
132};
133typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
134DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
135
136
137/* XEN_DOMCTL_getpageframeinfo */
138
139#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
140#define XEN_DOMCTL_PFINFO_NOTAB   (0x0U<<28)
141#define XEN_DOMCTL_PFINFO_L1TAB   (0x1U<<28)
142#define XEN_DOMCTL_PFINFO_L2TAB   (0x2U<<28)
143#define XEN_DOMCTL_PFINFO_L3TAB   (0x3U<<28)
144#define XEN_DOMCTL_PFINFO_L4TAB   (0x4U<<28)
145#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
146#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
147#define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
148#define XEN_DOMCTL_PFINFO_XALLOC  (0xeU<<28) /* allocate-only page */
149#define XEN_DOMCTL_PFINFO_BROKEN  (0xdU<<28) /* broken page */
150#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
151
152/* XEN_DOMCTL_getpageframeinfo3 */
153struct xen_domctl_getpageframeinfo3 {
154    /* IN variables. */
155    uint64_aligned_t num;
156    /* IN/OUT variables. */
157    XEN_GUEST_HANDLE_64(xen_pfn_t) array;
158};
159
160
161/*
162 * Control shadow pagetables operation
163 */
164/* XEN_DOMCTL_shadow_op */
165
166/* Disable shadow mode. */
167#define XEN_DOMCTL_SHADOW_OP_OFF         0
168
169/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
170#define XEN_DOMCTL_SHADOW_OP_ENABLE      32
171
172/* Log-dirty bitmap operations. */
173 /* Return the bitmap and clean internal copy for next round. */
174#define XEN_DOMCTL_SHADOW_OP_CLEAN       11
175 /* Return the bitmap but do not modify internal copy. */
176#define XEN_DOMCTL_SHADOW_OP_PEEK        12
177
178/* Memory allocation accessors. */
179#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION   30
180#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION   31
181
182/* Legacy enable operations. */
183 /* Equiv. to ENABLE with no mode flags. */
184#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST       1
185 /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
186#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY   2
187 /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
188#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE  3
189
190/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
191 /*
192  * Shadow pagetables are refcounted: guest does not use explicit mmu
193  * operations nor write-protect its pagetables.
194  */
195#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  (1 << 1)
196 /*
197  * Log pages in a bitmap as they are dirtied.
198  * Used for live relocation to determine which pages must be re-sent.
199  */
200#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
201 /*
202  * Automatically translate GPFNs into MFNs.
203  */
204#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
205 /*
206  * Xen does not steal virtual address space from the guest.
207  * Requires HVM support.
208  */
209#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL  (1 << 4)
210
211struct xen_domctl_shadow_op_stats {
212    uint32_t fault_count;
213    uint32_t dirty_count;
214};
215typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
216DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
217
218struct xen_domctl_shadow_op {
219    /* IN variables. */
220    uint32_t       op;       /* XEN_DOMCTL_SHADOW_OP_* */
221
222    /* OP_ENABLE */
223    uint32_t       mode;     /* XEN_DOMCTL_SHADOW_ENABLE_* */
224
225    /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
226    uint32_t       mb;       /* Shadow memory allocation in MB */
227
228    /* OP_PEEK / OP_CLEAN */
229    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
230    uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
231    struct xen_domctl_shadow_op_stats stats;
232};
233typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
234DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
235
236
237/* XEN_DOMCTL_max_mem */
238struct xen_domctl_max_mem {
239    /* IN variables. */
240    uint64_aligned_t max_memkb;
241};
242typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
243DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
244
245
246/* XEN_DOMCTL_setvcpucontext */
247/* XEN_DOMCTL_getvcpucontext */
248struct xen_domctl_vcpucontext {
249    uint32_t              vcpu;                  /* IN */
250    XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
251};
252typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
253DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
254
255
256/* XEN_DOMCTL_getvcpuinfo */
257struct xen_domctl_getvcpuinfo {
258    /* IN variables. */
259    uint32_t vcpu;
260    /* OUT variables. */
261    uint8_t  online;                  /* currently online (not hotplugged)? */
262    uint8_t  blocked;                 /* blocked waiting for an event? */
263    uint8_t  running;                 /* currently scheduled on its CPU? */
264    uint64_aligned_t cpu_time;        /* total cpu time consumed (ns) */
265    uint32_t cpu;                     /* current mapping   */
266};
267typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
268DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
269
270
271/* Get/set the NUMA node(s) with which the guest has affinity with. */
272/* XEN_DOMCTL_setnodeaffinity */
273/* XEN_DOMCTL_getnodeaffinity */
274struct xen_domctl_nodeaffinity {
275    struct xenctl_bitmap nodemap;/* IN */
276};
277typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
278DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
279
280
281/* Get/set which physical cpus a vcpu can execute on. */
282/* XEN_DOMCTL_setvcpuaffinity */
283/* XEN_DOMCTL_getvcpuaffinity */
284struct xen_domctl_vcpuaffinity {
285    /* IN variables. */
286    uint32_t  vcpu;
287 /* Set/get the hard affinity for vcpu */
288#define _XEN_VCPUAFFINITY_HARD  0
289#define XEN_VCPUAFFINITY_HARD   (1U<<_XEN_VCPUAFFINITY_HARD)
290 /* Set/get the soft affinity for vcpu */
291#define _XEN_VCPUAFFINITY_SOFT  1
292#define XEN_VCPUAFFINITY_SOFT   (1U<<_XEN_VCPUAFFINITY_SOFT)
293    uint32_t flags;
294    /*
295     * IN/OUT variables.
296     *
297     * Both are IN/OUT for XEN_DOMCTL_setvcpuaffinity, in which case they
298     * contain effective hard or/and soft affinity. That is, upon successful
299     * return, cpumap_soft, contains the intersection of the soft affinity,
300     * hard affinity and the cpupool's online CPUs for the domain (if
301     * XEN_VCPUAFFINITY_SOFT was set in flags). cpumap_hard contains the
302     * intersection between hard affinity and the cpupool's online CPUs (if
303     * XEN_VCPUAFFINITY_HARD was set in flags).
304     *
305     * Both are OUT-only for XEN_DOMCTL_getvcpuaffinity, in which case they
306     * contain the plain hard and/or soft affinity masks that were set during
307     * previous successful calls to XEN_DOMCTL_setvcpuaffinity (or the
308     * default values), without intersecting or altering them in any way.
309     */
310    struct xenctl_bitmap cpumap_hard;
311    struct xenctl_bitmap cpumap_soft;
312};
313typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
314DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
315
316
317/* XEN_DOMCTL_max_vcpus */
318struct xen_domctl_max_vcpus {
319    uint32_t max;           /* maximum number of vcpus */
320};
321typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
322DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
323
324
325/* XEN_DOMCTL_scheduler_op */
326/* Scheduler types. */
327/* #define XEN_SCHEDULER_SEDF  4 (Removed) */
328#define XEN_SCHEDULER_CREDIT   5
329#define XEN_SCHEDULER_CREDIT2  6
330#define XEN_SCHEDULER_ARINC653 7
331#define XEN_SCHEDULER_RTDS     8
332
333/* Set or get info? */
334#define XEN_DOMCTL_SCHEDOP_putinfo 0
335#define XEN_DOMCTL_SCHEDOP_getinfo 1
336struct xen_domctl_scheduler_op {
337    uint32_t sched_id;  /* XEN_SCHEDULER_* */
338    uint32_t cmd;       /* XEN_DOMCTL_SCHEDOP_* */
339    union {
340        struct xen_domctl_sched_credit {
341            uint16_t weight;
342            uint16_t cap;
343        } credit;
344        struct xen_domctl_sched_credit2 {
345            uint16_t weight;
346        } credit2;
347        struct xen_domctl_sched_rtds {
348            uint32_t period;
349            uint32_t budget;
350        } rtds;
351    } u;
352};
353typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
354DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
355
356
357/* XEN_DOMCTL_setdomainhandle */
358struct xen_domctl_setdomainhandle {
359    xen_domain_handle_t handle;
360};
361typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
362DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
363
364
365/* XEN_DOMCTL_setdebugging */
366struct xen_domctl_setdebugging {
367    uint8_t enable;
368};
369typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
370DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
371
372
373/* XEN_DOMCTL_irq_permission */
374struct xen_domctl_irq_permission {
375    uint8_t pirq;
376    uint8_t allow_access;    /* flag to specify enable/disable of IRQ access */
377};
378typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
379DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
380
381
382/* XEN_DOMCTL_iomem_permission */
383struct xen_domctl_iomem_permission {
384    uint64_aligned_t first_mfn;/* first page (physical page number) in range */
385    uint64_aligned_t nr_mfns;  /* number of pages in range (>0) */
386    uint8_t  allow_access;     /* allow (!0) or deny (0) access to range? */
387};
388typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
389DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
390
391
392/* XEN_DOMCTL_ioport_permission */
393struct xen_domctl_ioport_permission {
394    uint32_t first_port;              /* first port int range */
395    uint32_t nr_ports;                /* size of port range */
396    uint8_t  allow_access;            /* allow or deny access to range? */
397};
398typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
399DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
400
401
402/* XEN_DOMCTL_hypercall_init */
403struct xen_domctl_hypercall_init {
404    uint64_aligned_t  gmfn;           /* GMFN to be initialised */
405};
406typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
407DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
408
409
410/* XEN_DOMCTL_settimeoffset */
411struct xen_domctl_settimeoffset {
412    int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
413};
414typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
415DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
416
417/* XEN_DOMCTL_gethvmcontext */
418/* XEN_DOMCTL_sethvmcontext */
419typedef struct xen_domctl_hvmcontext {
420    uint32_t size; /* IN/OUT: size of buffer / bytes filled */
421    XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
422                                        * gethvmcontext with NULL
423                                        * buffer to get size req'd */
424} xen_domctl_hvmcontext_t;
425DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
426
427
428/* XEN_DOMCTL_set_address_size */
429/* XEN_DOMCTL_get_address_size */
430typedef struct xen_domctl_address_size {
431    uint32_t size;
432} xen_domctl_address_size_t;
433DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
434
435
436/* XEN_DOMCTL_sendtrigger */
437#define XEN_DOMCTL_SENDTRIGGER_NMI    0
438#define XEN_DOMCTL_SENDTRIGGER_RESET  1
439#define XEN_DOMCTL_SENDTRIGGER_INIT   2
440#define XEN_DOMCTL_SENDTRIGGER_POWER  3
441#define XEN_DOMCTL_SENDTRIGGER_SLEEP  4
442struct xen_domctl_sendtrigger {
443    uint32_t  trigger;  /* IN */
444    uint32_t  vcpu;     /* IN */
445};
446typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
447DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
448
449
450/* Assign a device to a guest. Sets up IOMMU structures. */
451/* XEN_DOMCTL_assign_device */
452/* XEN_DOMCTL_test_assign_device */
453/*
454 * XEN_DOMCTL_deassign_device: The behavior of this DOMCTL differs
455 * between the different type of device:
456 *  - PCI device (XEN_DOMCTL_DEV_PCI) will be reassigned to DOM0
457 *  - DT device (XEN_DOMCTL_DT_PCI) will left unassigned. DOM0
458 *  will have to call XEN_DOMCTL_assign_device in order to use the
459 *  device.
460 */
461#define XEN_DOMCTL_DEV_PCI      0
462#define XEN_DOMCTL_DEV_DT       1
463struct xen_domctl_assign_device {
464    uint32_t dev;   /* XEN_DOMCTL_DEV_* */
465    union {
466        struct {
467            uint32_t machine_sbdf;   /* machine PCI ID of assigned device */
468        } pci;
469        struct {
470            uint32_t size; /* Length of the path */
471            XEN_GUEST_HANDLE_64(char) path; /* path to the device tree node */
472        } dt;
473    } u;
474    /* IN */
475#define XEN_DOMCTL_DEV_RDM_RELAXED      1
476    uint32_t  flag;   /* flag of assigned device */
477};
478typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
479DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
480
481/* Retrieve sibling devices infomation of machine_sbdf */
482/* XEN_DOMCTL_get_device_group */
483struct xen_domctl_get_device_group {
484    uint32_t  machine_sbdf;     /* IN */
485    uint32_t  max_sdevs;        /* IN */
486    uint32_t  num_sdevs;        /* OUT */
487    XEN_GUEST_HANDLE_64(uint32)  sdev_array;   /* OUT */
488};
489typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
490DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
491
492/* Pass-through interrupts: bind real irq -> hvm devfn. */
493/* XEN_DOMCTL_bind_pt_irq */
494/* XEN_DOMCTL_unbind_pt_irq */
495typedef enum pt_irq_type_e {
496    PT_IRQ_TYPE_PCI,
497    PT_IRQ_TYPE_ISA,
498    PT_IRQ_TYPE_MSI,
499    PT_IRQ_TYPE_MSI_TRANSLATE,
500    PT_IRQ_TYPE_SPI,    /* ARM: valid range 32-1019 */
501} pt_irq_type_t;
502struct xen_domctl_bind_pt_irq {
503    uint32_t machine_irq;
504    pt_irq_type_t irq_type;
505    uint32_t hvm_domid;
506
507    union {
508        struct {
509            uint8_t isa_irq;
510        } isa;
511        struct {
512            uint8_t bus;
513            uint8_t device;
514            uint8_t intx;
515        } pci;
516        struct {
517            uint8_t gvec;
518            uint32_t gflags;
519            uint64_aligned_t gtable;
520        } msi;
521        struct {
522            uint16_t spi;
523        } spi;
524    } u;
525};
526typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
527DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
528
529
530/* Bind machine I/O address range -> HVM address range. */
531/* If this returns -E2BIG lower nr_mfns value. */
532/* XEN_DOMCTL_memory_mapping */
533#define DPCI_ADD_MAPPING         1
534#define DPCI_REMOVE_MAPPING      0
535struct xen_domctl_memory_mapping {
536    uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
537    uint64_aligned_t first_mfn; /* first page (machine page) in range */
538    uint64_aligned_t nr_mfns;   /* number of pages in range (>0) */
539    uint32_t add_mapping;       /* add or remove mapping */
540    uint32_t padding;           /* padding for 64-bit aligned structure */
541};
542typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
543DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
544
545
546/* Bind machine I/O port range -> HVM I/O port range. */
547/* XEN_DOMCTL_ioport_mapping */
548struct xen_domctl_ioport_mapping {
549    uint32_t first_gport;     /* first guest IO port*/
550    uint32_t first_mport;     /* first machine IO port */
551    uint32_t nr_ports;        /* size of port range */
552    uint32_t add_mapping;     /* add or remove mapping */
553};
554typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
555DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
556
557
558/*
559 * Pin caching type of RAM space for x86 HVM domU.
560 */
561/* XEN_DOMCTL_pin_mem_cacheattr */
562/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
563#define XEN_DOMCTL_MEM_CACHEATTR_UC  0
564#define XEN_DOMCTL_MEM_CACHEATTR_WC  1
565#define XEN_DOMCTL_MEM_CACHEATTR_WT  4
566#define XEN_DOMCTL_MEM_CACHEATTR_WP  5
567#define XEN_DOMCTL_MEM_CACHEATTR_WB  6
568#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
569#define XEN_DOMCTL_DELETE_MEM_CACHEATTR (~(uint32_t)0)
570struct xen_domctl_pin_mem_cacheattr {
571    uint64_aligned_t start, end;
572    uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
573};
574typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
575DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
576
577
578/* XEN_DOMCTL_set_ext_vcpucontext */
579/* XEN_DOMCTL_get_ext_vcpucontext */
580struct xen_domctl_ext_vcpucontext {
581    /* IN: VCPU that this call applies to. */
582    uint32_t         vcpu;
583    /*
584     * SET: Size of struct (IN)
585     * GET: Size of struct (OUT, up to 128 bytes)
586     */
587    uint32_t         size;
588#if defined(__i386__) || defined(__x86_64__)
589    /* SYSCALL from 32-bit mode and SYSENTER callback information. */
590    /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
591    uint64_aligned_t syscall32_callback_eip;
592    uint64_aligned_t sysenter_callback_eip;
593    uint16_t         syscall32_callback_cs;
594    uint16_t         sysenter_callback_cs;
595    uint8_t          syscall32_disables_events;
596    uint8_t          sysenter_disables_events;
597#if defined(__GNUC__)
598    union {
599        uint64_aligned_t mcg_cap;
600        struct hvm_vmce_vcpu vmce;
601    };
602#else
603    struct hvm_vmce_vcpu vmce;
604#endif
605#endif
606};
607typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
608DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
609
610/*
611 * Set the target domain for a domain
612 */
613/* XEN_DOMCTL_set_target */
614struct xen_domctl_set_target {
615    domid_t target;
616};
617typedef struct xen_domctl_set_target xen_domctl_set_target_t;
618DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
619
620#if defined(__i386__) || defined(__x86_64__)
621# define XEN_CPUID_INPUT_UNUSED  0xFFFFFFFF
622/* XEN_DOMCTL_set_cpuid */
623struct xen_domctl_cpuid {
624  uint32_t input[2];
625  uint32_t eax;
626  uint32_t ebx;
627  uint32_t ecx;
628  uint32_t edx;
629};
630typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
631DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
632#endif
633
634/*
635 * Arranges that if the domain suspends (specifically, if it shuts
636 * down with code SHUTDOWN_suspend), this event channel will be
637 * notified.
638 *
639 * This is _instead of_ the usual notification to the global
640 * VIRQ_DOM_EXC.  (In most systems that pirq is owned by xenstored.)
641 *
642 * Only one subscription per domain is possible.  Last subscriber
643 * wins; others are silently displaced.
644 *
645 * NB that contrary to the rather general name, it only applies to
646 * domain shutdown with code suspend.  Shutdown for other reasons
647 * (including crash), and domain death, are notified to VIRQ_DOM_EXC
648 * regardless.
649 */
650/* XEN_DOMCTL_subscribe */
651struct xen_domctl_subscribe {
652    uint32_t port; /* IN */
653};
654typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
655DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
656
657/*
658 * Define the maximum machine address size which should be allocated
659 * to a guest.
660 */
661/* XEN_DOMCTL_set_machine_address_size */
662/* XEN_DOMCTL_get_machine_address_size */
663
664/*
665 * Do not inject spurious page faults into this domain.
666 */
667/* XEN_DOMCTL_suppress_spurious_page_faults */
668
669/* XEN_DOMCTL_debug_op */
670#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF         0
671#define XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON          1
672struct xen_domctl_debug_op {
673    uint32_t op;   /* IN */
674    uint32_t vcpu; /* IN */
675};
676typedef struct xen_domctl_debug_op xen_domctl_debug_op_t;
677DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t);
678
679/*
680 * Request a particular record from the HVM context
681 */
682/* XEN_DOMCTL_gethvmcontext_partial */
683typedef struct xen_domctl_hvmcontext_partial {
684    uint32_t type;                      /* IN: Type of record required */
685    uint32_t instance;                  /* IN: Instance of that type */
686    XEN_GUEST_HANDLE_64(uint8) buffer;  /* OUT: buffer to write record into */
687} xen_domctl_hvmcontext_partial_t;
688DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
689
690/* XEN_DOMCTL_disable_migrate */
691typedef struct xen_domctl_disable_migrate {
692    uint32_t disable; /* IN: 1: disable migration and restore */
693} xen_domctl_disable_migrate_t;
694
695
696/* XEN_DOMCTL_gettscinfo */
697/* XEN_DOMCTL_settscinfo */
698typedef struct xen_domctl_tsc_info {
699    /* IN/OUT */
700    uint32_t tsc_mode;
701    uint32_t gtsc_khz;
702    uint32_t incarnation;
703    uint32_t pad;
704    uint64_aligned_t elapsed_nsec;
705} xen_domctl_tsc_info_t;
706
707/* XEN_DOMCTL_gdbsx_guestmemio      guest mem io */
708struct xen_domctl_gdbsx_memio {
709    /* IN */
710    uint64_aligned_t pgd3val;/* optional: init_mm.pgd[3] value */
711    uint64_aligned_t gva;    /* guest virtual address */
712    uint64_aligned_t uva;    /* user buffer virtual address */
713    uint32_t         len;    /* number of bytes to read/write */
714    uint8_t          gwr;    /* 0 = read from guest. 1 = write to guest */
715    /* OUT */
716    uint32_t         remain; /* bytes remaining to be copied */
717};
718
719/* XEN_DOMCTL_gdbsx_pausevcpu */
720/* XEN_DOMCTL_gdbsx_unpausevcpu */
721struct xen_domctl_gdbsx_pauseunp_vcpu { /* pause/unpause a vcpu */
722    uint32_t         vcpu;         /* which vcpu */
723};
724
725/* XEN_DOMCTL_gdbsx_domstatus */
726struct xen_domctl_gdbsx_domstatus {
727    /* OUT */
728    uint8_t          paused;     /* is the domain paused */
729    uint32_t         vcpu_id;    /* any vcpu in an event? */
730    uint32_t         vcpu_ev;    /* if yes, what event? */
731};
732
733/*
734 * VM event operations
735 */
736
737/* XEN_DOMCTL_vm_event_op */
738
739/*
740 * There are currently three rings available for VM events:
741 * sharing, monitor and paging. This hypercall allows one to
742 * control these rings (enable/disable), as well as to signal
743 * to the hypervisor to pull responses (resume) from the given
744 * ring.
745 */
746#define XEN_VM_EVENT_ENABLE               0
747#define XEN_VM_EVENT_DISABLE              1
748#define XEN_VM_EVENT_RESUME               2
749
750/*
751 * Domain memory paging
752 * Page memory in and out.
753 * Domctl interface to set up and tear down the
754 * pager<->hypervisor interface. Use XENMEM_paging_op*
755 * to perform per-page operations.
756 *
757 * The XEN_VM_EVENT_PAGING_ENABLE domctl returns several
758 * non-standard error codes to indicate why paging could not be enabled:
759 * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
760 * EMLINK - guest has iommu passthrough enabled
761 * EXDEV  - guest has PoD enabled
762 * EBUSY  - guest has or had paging enabled, ring buffer still active
763 */
764#define XEN_DOMCTL_VM_EVENT_OP_PAGING            1
765
766/*
767 * Monitor helper.
768 *
769 * As with paging, use the domctl for teardown/setup of the
770 * helper<->hypervisor interface.
771 *
772 * The monitor interface can be used to register for various VM events. For
773 * example, there are HVM hypercalls to set the per-page access permissions
774 * of every page in a domain.  When one of these permissions--independent,
775 * read, write, and execute--is violated, the VCPU is paused and a memory event
776 * is sent with what happened. The memory event handler can then resume the
777 * VCPU and redo the access with a XEN_VM_EVENT_RESUME option.
778 *
779 * See public/vm_event.h for the list of available events that can be
780 * subscribed to via the monitor interface.
781 *
782 * The XEN_VM_EVENT_MONITOR_* domctls returns
783 * non-standard error codes to indicate why access could not be enabled:
784 * ENODEV - host lacks HAP support (EPT/NPT) or HAP is disabled in guest
785 * EBUSY  - guest has or had access enabled, ring buffer still active
786 *
787 */
788#define XEN_DOMCTL_VM_EVENT_OP_MONITOR           2
789
790/*
791 * Sharing ENOMEM helper.
792 *
793 * As with paging, use the domctl for teardown/setup of the
794 * helper<->hypervisor interface.
795 *
796 * If setup, this ring is used to communicate failed allocations
797 * in the unshare path. XENMEM_sharing_op_resume is used to wake up
798 * vcpus that could not unshare.
799 *
800 * Note that shring can be turned on (as per the domctl below)
801 * *without* this ring being setup.
802 */
803#define XEN_DOMCTL_VM_EVENT_OP_SHARING           3
804
805/* Use for teardown/setup of helper<->hypervisor interface for paging,
806 * access and sharing.*/
807struct xen_domctl_vm_event_op {
808    uint32_t       op;           /* XEN_VM_EVENT_* */
809    uint32_t       mode;         /* XEN_DOMCTL_VM_EVENT_OP_* */
810
811    uint32_t port;              /* OUT: event channel for ring */
812};
813typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
814DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
815
816/*
817 * Memory sharing operations
818 */
819/* XEN_DOMCTL_mem_sharing_op.
820 * The CONTROL sub-domctl is used for bringup/teardown. */
821#define XEN_DOMCTL_MEM_SHARING_CONTROL          0
822
823struct xen_domctl_mem_sharing_op {
824    uint8_t op; /* XEN_DOMCTL_MEM_SHARING_* */
825
826    union {
827        uint8_t enable;                   /* CONTROL */
828    } u;
829};
830typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
831DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
832
833struct xen_domctl_audit_p2m {
834    /* OUT error counts */
835    uint64_t orphans;
836    uint64_t m2p_bad;
837    uint64_t p2m_bad;
838};
839typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
840DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);
841
842struct xen_domctl_set_virq_handler {
843    uint32_t virq; /* IN */
844};
845typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
846DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
847
848#if defined(__i386__) || defined(__x86_64__)
849/* XEN_DOMCTL_setvcpuextstate */
850/* XEN_DOMCTL_getvcpuextstate */
851struct xen_domctl_vcpuextstate {
852    /* IN: VCPU that this call applies to. */
853    uint32_t         vcpu;
854    /*
855     * SET: Ignored.
856     * GET: xfeature support mask of struct (IN/OUT)
857     * xfeature mask is served as identifications of the saving format
858     * so that compatible CPUs can have a check on format to decide
859     * whether it can restore.
860     */
861    uint64_aligned_t         xfeature_mask;
862    /*
863     * SET: Size of struct (IN)
864     * GET: Size of struct (IN/OUT)
865     */
866    uint64_aligned_t         size;
867    XEN_GUEST_HANDLE_64(uint64) buffer;
868};
869typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
870DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
871#endif
872
873/* XEN_DOMCTL_set_access_required: sets whether a memory event listener
874 * must be present to handle page access events: if false, the page
875 * access will revert to full permissions if no one is listening;
876 *  */
877struct xen_domctl_set_access_required {
878    uint8_t access_required;
879};
880typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
881DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
882
883struct xen_domctl_set_broken_page_p2m {
884    uint64_aligned_t pfn;
885};
886typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
887DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
888
889/*
890 * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
891 * number the guest may use.  Use this limit the amount of resources
892 * (global mapping space, xenheap) a guest may use for event channels.
893 */
894struct xen_domctl_set_max_evtchn {
895    uint32_t max_port;
896};
897typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
898DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
899
900/*
901 * ARM: Clean and invalidate caches associated with given region of
902 * guest memory.
903 */
904struct xen_domctl_cacheflush {
905    /* IN: page range to flush. */
906    xen_pfn_t start_pfn, nr_pfns;
907};
908typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
909DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
910
911#if defined(__i386__) || defined(__x86_64__)
912struct xen_domctl_vcpu_msr {
913    uint32_t         index;
914    uint32_t         reserved;
915    uint64_aligned_t value;
916};
917typedef struct xen_domctl_vcpu_msr xen_domctl_vcpu_msr_t;
918DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msr_t);
919
920/*
921 * XEN_DOMCTL_set_vcpu_msrs / XEN_DOMCTL_get_vcpu_msrs.
922 *
923 * Input:
924 * - A NULL 'msrs' guest handle is a request for the maximum 'msr_count'.
925 * - Otherwise, 'msr_count' is the number of entries in 'msrs'.
926 *
927 * Output for get:
928 * - If 'msr_count' is less than the number Xen needs to write, -ENOBUFS shall
929 *   be returned and 'msr_count' updated to reflect the intended number.
930 * - On success, 'msr_count' shall indicate the number of MSRs written, which
931 *   may be less than the maximum if some are not currently used by the vcpu.
932 *
933 * Output for set:
934 * - If Xen encounters an error with a specific MSR, -EINVAL shall be returned
935 *   and 'msr_count' shall be set to the offending index, to aid debugging.
936 */
937struct xen_domctl_vcpu_msrs {
938    uint32_t vcpu;                                   /* IN     */
939    uint32_t msr_count;                              /* IN/OUT */
940    XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */
941};
942typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
943DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
944#endif
945
946/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
947struct xen_domctl_vnuma {
948    /* IN: number of vNUMA nodes to setup. Shall be greater than 0 */
949    uint32_t nr_vnodes;
950    /* IN: number of memory ranges to setup */
951    uint32_t nr_vmemranges;
952    /*
953     * IN: number of vCPUs of the domain (used as size of the vcpu_to_vnode
954     * array declared below). Shall be equal to the domain's max_vcpus.
955     */
956    uint32_t nr_vcpus;
957    uint32_t pad;                                  /* must be zero */
958
959    /*
960     * IN: array for specifying the distances of the vNUMA nodes
961     * between each others. Shall have nr_vnodes*nr_vnodes elements.
962     */
963    XEN_GUEST_HANDLE_64(uint) vdistance;
964    /*
965     * IN: array for specifying to what vNUMA node each vCPU belongs.
966     * Shall have nr_vcpus elements.
967     */
968    XEN_GUEST_HANDLE_64(uint) vcpu_to_vnode;
969    /*
970     * IN: array for specifying on what physical NUMA node each vNUMA
971     * node is placed. Shall have nr_vnodes elements.
972     */
973    XEN_GUEST_HANDLE_64(uint) vnode_to_pnode;
974    /*
975     * IN: array for specifying the memory ranges. Shall have
976     * nr_vmemranges elements.
977     */
978    XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
979};
980typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
981DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
982
983struct xen_domctl_psr_cmt_op {
984#define XEN_DOMCTL_PSR_CMT_OP_DETACH         0
985#define XEN_DOMCTL_PSR_CMT_OP_ATTACH         1
986#define XEN_DOMCTL_PSR_CMT_OP_QUERY_RMID     2
987    uint32_t cmd;
988    uint32_t data;
989};
990typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
991DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
992
993/*  XEN_DOMCTL_MONITOR_*
994 *
995 * Enable/disable monitoring various VM events.
996 * This domctl configures what events will be reported to helper apps
997 * via the ring buffer "MONITOR". The ring has to be first enabled
998 * with the domctl XEN_DOMCTL_VM_EVENT_OP_MONITOR.
999 *
1000 * GET_CAPABILITIES can be used to determine which of these features is
1001 * available on a given platform.
1002 *
1003 * NOTICE: mem_access events are also delivered via the "MONITOR" ring buffer;
1004 * however, enabling/disabling those events is performed with the use of
1005 * memory_op hypercalls!
1006 */
1007#define XEN_DOMCTL_MONITOR_OP_ENABLE            0
1008#define XEN_DOMCTL_MONITOR_OP_DISABLE           1
1009#define XEN_DOMCTL_MONITOR_OP_GET_CAPABILITIES  2
1010
1011#define XEN_DOMCTL_MONITOR_EVENT_WRITE_CTRLREG         0
1012#define XEN_DOMCTL_MONITOR_EVENT_MOV_TO_MSR            1
1013#define XEN_DOMCTL_MONITOR_EVENT_SINGLESTEP            2
1014#define XEN_DOMCTL_MONITOR_EVENT_SOFTWARE_BREAKPOINT   3
1015#define XEN_DOMCTL_MONITOR_EVENT_GUEST_REQUEST         4
1016
1017struct xen_domctl_monitor_op {
1018    uint32_t op; /* XEN_DOMCTL_MONITOR_OP_* */
1019
1020    /*
1021     * When used with ENABLE/DISABLE this has to be set to
1022     * the requested XEN_DOMCTL_MONITOR_EVENT_* value.
1023     * With GET_CAPABILITIES this field returns a bitmap of
1024     * events supported by the platform, in the format
1025     * (1 << XEN_DOMCTL_MONITOR_EVENT_*).
1026     */
1027    uint32_t event;
1028
1029    /*
1030     * Further options when issuing XEN_DOMCTL_MONITOR_OP_ENABLE.
1031     */
1032    union {
1033        struct {
1034            /* Which control register */
1035            uint8_t index;
1036            /* Pause vCPU until response */
1037            uint8_t sync;
1038            /* Send event only on a change of value */
1039            uint8_t onchangeonly;
1040        } mov_to_cr;
1041
1042        struct {
1043            /* Enable the capture of an extended set of MSRs */
1044            uint8_t extended_capture;
1045        } mov_to_msr;
1046
1047        struct {
1048            /* Pause vCPU until response */
1049            uint8_t sync;
1050        } guest_request;
1051    } u;
1052};
1053typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
1054DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
1055
1056struct xen_domctl_psr_cat_op {
1057#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM     0
1058#define XEN_DOMCTL_PSR_CAT_OP_GET_L3_CBM     1
1059    uint32_t cmd;       /* IN: XEN_DOMCTL_PSR_CAT_OP_* */
1060    uint32_t target;    /* IN */
1061    uint64_t data;      /* IN/OUT */
1062};
1063typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
1064DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
1065
1066struct xen_domctl {
1067    uint32_t cmd;
1068#define XEN_DOMCTL_createdomain                   1
1069#define XEN_DOMCTL_destroydomain                  2
1070#define XEN_DOMCTL_pausedomain                    3
1071#define XEN_DOMCTL_unpausedomain                  4
1072#define XEN_DOMCTL_getdomaininfo                  5
1073#define XEN_DOMCTL_getmemlist                     6
1074/* #define XEN_DOMCTL_getpageframeinfo            7 Obsolete - use getpageframeinfo3 */
1075/* #define XEN_DOMCTL_getpageframeinfo2           8 Obsolete - use getpageframeinfo3 */
1076#define XEN_DOMCTL_setvcpuaffinity                9
1077#define XEN_DOMCTL_shadow_op                     10
1078#define XEN_DOMCTL_max_mem                       11
1079#define XEN_DOMCTL_setvcpucontext                12
1080#define XEN_DOMCTL_getvcpucontext                13
1081#define XEN_DOMCTL_getvcpuinfo                   14
1082#define XEN_DOMCTL_max_vcpus                     15
1083#define XEN_DOMCTL_scheduler_op                  16
1084#define XEN_DOMCTL_setdomainhandle               17
1085#define XEN_DOMCTL_setdebugging                  18
1086#define XEN_DOMCTL_irq_permission                19
1087#define XEN_DOMCTL_iomem_permission              20
1088#define XEN_DOMCTL_ioport_permission             21
1089#define XEN_DOMCTL_hypercall_init                22
1090#define XEN_DOMCTL_arch_setup                    23 /* Obsolete IA64 only */
1091#define XEN_DOMCTL_settimeoffset                 24
1092#define XEN_DOMCTL_getvcpuaffinity               25
1093#define XEN_DOMCTL_real_mode_area                26 /* Obsolete PPC only */
1094#define XEN_DOMCTL_resumedomain                  27
1095#define XEN_DOMCTL_sendtrigger                   28
1096#define XEN_DOMCTL_subscribe                     29
1097#define XEN_DOMCTL_gethvmcontext                 33
1098#define XEN_DOMCTL_sethvmcontext                 34
1099#define XEN_DOMCTL_set_address_size              35
1100#define XEN_DOMCTL_get_address_size              36
1101#define XEN_DOMCTL_assign_device                 37
1102#define XEN_DOMCTL_bind_pt_irq                   38
1103#define XEN_DOMCTL_memory_mapping                39
1104#define XEN_DOMCTL_ioport_mapping                40
1105#define XEN_DOMCTL_pin_mem_cacheattr             41
1106#define XEN_DOMCTL_set_ext_vcpucontext           42
1107#define XEN_DOMCTL_get_ext_vcpucontext           43
1108#define XEN_DOMCTL_set_opt_feature               44 /* Obsolete IA64 only */
1109#define XEN_DOMCTL_test_assign_device            45
1110#define XEN_DOMCTL_set_target                    46
1111#define XEN_DOMCTL_deassign_device               47
1112#define XEN_DOMCTL_unbind_pt_irq                 48
1113#define XEN_DOMCTL_set_cpuid                     49
1114#define XEN_DOMCTL_get_device_group              50
1115#define XEN_DOMCTL_set_machine_address_size      51
1116#define XEN_DOMCTL_get_machine_address_size      52
1117#define XEN_DOMCTL_suppress_spurious_page_faults 53
1118#define XEN_DOMCTL_debug_op                      54
1119#define XEN_DOMCTL_gethvmcontext_partial         55
1120#define XEN_DOMCTL_vm_event_op                   56
1121#define XEN_DOMCTL_mem_sharing_op                57
1122#define XEN_DOMCTL_disable_migrate               58
1123#define XEN_DOMCTL_gettscinfo                    59
1124#define XEN_DOMCTL_settscinfo                    60
1125#define XEN_DOMCTL_getpageframeinfo3             61
1126#define XEN_DOMCTL_setvcpuextstate               62
1127#define XEN_DOMCTL_getvcpuextstate               63
1128#define XEN_DOMCTL_set_access_required           64
1129#define XEN_DOMCTL_audit_p2m                     65
1130#define XEN_DOMCTL_set_virq_handler              66
1131#define XEN_DOMCTL_set_broken_page_p2m           67
1132#define XEN_DOMCTL_setnodeaffinity               68
1133#define XEN_DOMCTL_getnodeaffinity               69
1134#define XEN_DOMCTL_set_max_evtchn                70
1135#define XEN_DOMCTL_cacheflush                    71
1136#define XEN_DOMCTL_get_vcpu_msrs                 72
1137#define XEN_DOMCTL_set_vcpu_msrs                 73
1138#define XEN_DOMCTL_setvnumainfo                  74
1139#define XEN_DOMCTL_psr_cmt_op                    75
1140#define XEN_DOMCTL_monitor_op                    77
1141#define XEN_DOMCTL_psr_cat_op                    78
1142#define XEN_DOMCTL_gdbsx_guestmemio            1000
1143#define XEN_DOMCTL_gdbsx_pausevcpu             1001
1144#define XEN_DOMCTL_gdbsx_unpausevcpu           1002
1145#define XEN_DOMCTL_gdbsx_domstatus             1003
1146    uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
1147    domid_t  domain;
1148    union {
1149        struct xen_domctl_createdomain      createdomain;
1150        struct xen_domctl_getdomaininfo     getdomaininfo;
1151        struct xen_domctl_getmemlist        getmemlist;
1152        struct xen_domctl_getpageframeinfo3 getpageframeinfo3;
1153        struct xen_domctl_nodeaffinity      nodeaffinity;
1154        struct xen_domctl_vcpuaffinity      vcpuaffinity;
1155        struct xen_domctl_shadow_op         shadow_op;
1156        struct xen_domctl_max_mem           max_mem;
1157        struct xen_domctl_vcpucontext       vcpucontext;
1158        struct xen_domctl_getvcpuinfo       getvcpuinfo;
1159        struct xen_domctl_max_vcpus         max_vcpus;
1160        struct xen_domctl_scheduler_op      scheduler_op;
1161        struct xen_domctl_setdomainhandle   setdomainhandle;
1162        struct xen_domctl_setdebugging      setdebugging;
1163        struct xen_domctl_irq_permission    irq_permission;
1164        struct xen_domctl_iomem_permission  iomem_permission;
1165        struct xen_domctl_ioport_permission ioport_permission;
1166        struct xen_domctl_hypercall_init    hypercall_init;
1167        struct xen_domctl_settimeoffset     settimeoffset;
1168        struct xen_domctl_disable_migrate   disable_migrate;
1169        struct xen_domctl_tsc_info          tsc_info;
1170        struct xen_domctl_hvmcontext        hvmcontext;
1171        struct xen_domctl_hvmcontext_partial hvmcontext_partial;
1172        struct xen_domctl_address_size      address_size;
1173        struct xen_domctl_sendtrigger       sendtrigger;
1174        struct xen_domctl_get_device_group  get_device_group;
1175        struct xen_domctl_assign_device     assign_device;
1176        struct xen_domctl_bind_pt_irq       bind_pt_irq;
1177        struct xen_domctl_memory_mapping    memory_mapping;
1178        struct xen_domctl_ioport_mapping    ioport_mapping;
1179        struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
1180        struct xen_domctl_ext_vcpucontext   ext_vcpucontext;
1181        struct xen_domctl_set_target        set_target;
1182        struct xen_domctl_subscribe         subscribe;
1183        struct xen_domctl_debug_op          debug_op;
1184        struct xen_domctl_vm_event_op       vm_event_op;
1185        struct xen_domctl_mem_sharing_op    mem_sharing_op;
1186#if defined(__i386__) || defined(__x86_64__)
1187        struct xen_domctl_cpuid             cpuid;
1188        struct xen_domctl_vcpuextstate      vcpuextstate;
1189        struct xen_domctl_vcpu_msrs         vcpu_msrs;
1190#endif
1191        struct xen_domctl_set_access_required access_required;
1192        struct xen_domctl_audit_p2m         audit_p2m;
1193        struct xen_domctl_set_virq_handler  set_virq_handler;
1194        struct xen_domctl_set_max_evtchn    set_max_evtchn;
1195        struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
1196        struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
1197        struct xen_domctl_cacheflush        cacheflush;
1198        struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
1199        struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
1200        struct xen_domctl_vnuma             vnuma;
1201        struct xen_domctl_psr_cmt_op        psr_cmt_op;
1202        struct xen_domctl_monitor_op        monitor_op;
1203        struct xen_domctl_psr_cat_op        psr_cat_op;
1204        uint8_t                             pad[128];
1205    } u;
1206};
1207typedef struct xen_domctl xen_domctl_t;
1208DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
1209
1210#endif /* __XEN_PUBLIC_DOMCTL_H__ */
1211
1212/*
1213 * Local variables:
1214 * mode: C
1215 * c-file-style: "BSD"
1216 * c-basic-offset: 4
1217 * tab-width: 4
1218 * indent-tabs-mode: nil
1219 * End:
1220 */
1221