1/******************************************************************************
2 * sysctl.h
3 *
4 * System management operations. For use by node control stack.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2002-2006, K Fraser
25 */
26
27#ifndef __XEN_PUBLIC_SYSCTL_H__
28#define __XEN_PUBLIC_SYSCTL_H__
29
30#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
31#error "sysctl operations are intended for use by node control tools only"
32#endif
33
34#include "xen.h"
35#include "domctl.h"
36#include "physdev.h"
37#include "tmem.h"
38
39#define XEN_SYSCTL_INTERFACE_VERSION 0x0000000C
40
41/*
42 * Read console content from Xen buffer ring.
43 */
44/* XEN_SYSCTL_readconsole */
45struct xen_sysctl_readconsole {
46    /* IN: Non-zero -> clear after reading. */
47    uint8_t clear;
48    /* IN: Non-zero -> start index specified by @index field. */
49    uint8_t incremental;
50    uint8_t pad0, pad1;
51    /*
52     * IN:  Start index for consuming from ring buffer (if @incremental);
53     * OUT: End index after consuming from ring buffer.
54     */
55    uint32_t index;
56    /* IN: Virtual address to write console data. */
57    XEN_GUEST_HANDLE_64(char) buffer;
58    /* IN: Size of buffer; OUT: Bytes written to buffer. */
59    uint32_t count;
60};
61typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
62DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
63
64/* Get trace buffers machine base address */
65/* XEN_SYSCTL_tbuf_op */
66struct xen_sysctl_tbuf_op {
67    /* IN variables */
68#define XEN_SYSCTL_TBUFOP_get_info     0
69#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
70#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
71#define XEN_SYSCTL_TBUFOP_set_size     3
72#define XEN_SYSCTL_TBUFOP_enable       4
73#define XEN_SYSCTL_TBUFOP_disable      5
74    uint32_t cmd;
75    /* IN/OUT variables */
76    struct xenctl_bitmap cpu_mask;
77    uint32_t             evt_mask;
78    /* OUT variables */
79    uint64_aligned_t buffer_mfn;
80    uint32_t size;  /* Also an IN variable! */
81};
82typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
83DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
84
85/*
86 * Get physical information about the host machine
87 */
88/* XEN_SYSCTL_physinfo */
89 /* (x86) The platform supports HVM guests. */
90#define _XEN_SYSCTL_PHYSCAP_hvm          0
91#define XEN_SYSCTL_PHYSCAP_hvm           (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
92 /* (x86) The platform supports HVM-guest direct access to I/O devices. */
93#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
94#define XEN_SYSCTL_PHYSCAP_hvm_directio  (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
95struct xen_sysctl_physinfo {
96    uint32_t threads_per_core;
97    uint32_t cores_per_socket;
98    uint32_t nr_cpus;     /* # CPUs currently online */
99    uint32_t max_cpu_id;  /* Largest possible CPU ID on this host */
100    uint32_t nr_nodes;    /* # nodes currently online */
101    uint32_t max_node_id; /* Largest possible node ID on this host */
102    uint32_t cpu_khz;
103    uint64_aligned_t total_pages;
104    uint64_aligned_t free_pages;
105    uint64_aligned_t scrub_pages;
106    uint64_aligned_t outstanding_pages;
107    uint32_t hw_cap[8];
108
109    /* XEN_SYSCTL_PHYSCAP_??? */
110    uint32_t capabilities;
111};
112typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
113DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
114
115/*
116 * Get the ID of the current scheduler.
117 */
118/* XEN_SYSCTL_sched_id */
119struct xen_sysctl_sched_id {
120    /* OUT variable */
121    uint32_t sched_id;
122};
123typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
124DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
125
126/* Interface for controlling Xen software performance counters. */
127/* XEN_SYSCTL_perfc_op */
128/* Sub-operations: */
129#define XEN_SYSCTL_PERFCOP_reset 1   /* Reset all counters to zero. */
130#define XEN_SYSCTL_PERFCOP_query 2   /* Get perfctr information. */
131struct xen_sysctl_perfc_desc {
132    char         name[80];             /* name of perf counter */
133    uint32_t     nr_vals;              /* number of values for this counter */
134};
135typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
136DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
137typedef uint32_t xen_sysctl_perfc_val_t;
138DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
139
140struct xen_sysctl_perfc_op {
141    /* IN variables. */
142    uint32_t       cmd;                /*  XEN_SYSCTL_PERFCOP_??? */
143    /* OUT variables. */
144    uint32_t       nr_counters;       /*  number of counters description  */
145    uint32_t       nr_vals;           /*  number of values  */
146    /* counter information (or NULL) */
147    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
148    /* counter values (or NULL) */
149    XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
150};
151typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
152DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
153
154/* XEN_SYSCTL_getdomaininfolist */
155struct xen_sysctl_getdomaininfolist {
156    /* IN variables. */
157    domid_t               first_domain;
158    uint32_t              max_domains;
159    XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
160    /* OUT variables. */
161    uint32_t              num_domains;
162};
163typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
164DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
165
166/* Inject debug keys into Xen. */
167/* XEN_SYSCTL_debug_keys */
168struct xen_sysctl_debug_keys {
169    /* IN variables. */
170    XEN_GUEST_HANDLE_64(char) keys;
171    uint32_t nr_keys;
172};
173typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
174DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
175
176/* Get physical CPU information. */
177/* XEN_SYSCTL_getcpuinfo */
178struct xen_sysctl_cpuinfo {
179    uint64_aligned_t idletime;
180};
181typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
182DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
183struct xen_sysctl_getcpuinfo {
184    /* IN variables. */
185    uint32_t max_cpus;
186    XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
187    /* OUT variables. */
188    uint32_t nr_cpus;
189};
190typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
191DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
192
193/* XEN_SYSCTL_availheap */
194struct xen_sysctl_availheap {
195    /* IN variables. */
196    uint32_t min_bitwidth;  /* Smallest address width (zero if don't care). */
197    uint32_t max_bitwidth;  /* Largest address width (zero if don't care). */
198    int32_t  node;          /* NUMA node of interest (-1 for all nodes). */
199    /* OUT variables. */
200    uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
201};
202typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
203DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
204
205/* XEN_SYSCTL_get_pmstat */
206struct pm_px_val {
207    uint64_aligned_t freq;        /* Px core frequency */
208    uint64_aligned_t residency;   /* Px residency time */
209    uint64_aligned_t count;       /* Px transition count */
210};
211typedef struct pm_px_val pm_px_val_t;
212DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
213
214struct pm_px_stat {
215    uint8_t total;        /* total Px states */
216    uint8_t usable;       /* usable Px states */
217    uint8_t last;         /* last Px state */
218    uint8_t cur;          /* current Px state */
219    XEN_GUEST_HANDLE_64(uint64) trans_pt;   /* Px transition table */
220    XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
221};
222typedef struct pm_px_stat pm_px_stat_t;
223DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
224
225struct pm_cx_stat {
226    uint32_t nr;    /* entry nr in triggers & residencies, including C0 */
227    uint32_t last;  /* last Cx state */
228    uint64_aligned_t idle_time;                 /* idle time from boot */
229    XEN_GUEST_HANDLE_64(uint64) triggers;    /* Cx trigger counts */
230    XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
231    uint32_t nr_pc;                          /* entry nr in pc[] */
232    uint32_t nr_cc;                          /* entry nr in cc[] */
233    /*
234     * These two arrays may (and generally will) have unused slots; slots not
235     * having a corresponding hardware register will not be written by the
236     * hypervisor. It is therefore up to the caller to put a suitable sentinel
237     * into all slots before invoking the function.
238     * Indexing is 1-biased (PC1/CC1 being at index 0).
239     */
240    XEN_GUEST_HANDLE_64(uint64) pc;
241    XEN_GUEST_HANDLE_64(uint64) cc;
242};
243
244struct xen_sysctl_get_pmstat {
245#define PMSTAT_CATEGORY_MASK 0xf0
246#define PMSTAT_PX            0x10
247#define PMSTAT_CX            0x20
248#define PMSTAT_get_max_px    (PMSTAT_PX | 0x1)
249#define PMSTAT_get_pxstat    (PMSTAT_PX | 0x2)
250#define PMSTAT_reset_pxstat  (PMSTAT_PX | 0x3)
251#define PMSTAT_get_max_cx    (PMSTAT_CX | 0x1)
252#define PMSTAT_get_cxstat    (PMSTAT_CX | 0x2)
253#define PMSTAT_reset_cxstat  (PMSTAT_CX | 0x3)
254    uint32_t type;
255    uint32_t cpuid;
256    union {
257        struct pm_px_stat getpx;
258        struct pm_cx_stat getcx;
259        /* other struct for tx, etc */
260    } u;
261};
262typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
263DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
264
265/* XEN_SYSCTL_cpu_hotplug */
266struct xen_sysctl_cpu_hotplug {
267    /* IN variables */
268    uint32_t cpu;   /* Physical cpu. */
269#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE  0
270#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
271    uint32_t op;    /* hotplug opcode */
272};
273typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
274DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
275
276/*
277 * Get/set xen power management, include
278 * 1. cpufreq governors and related parameters
279 */
280/* XEN_SYSCTL_pm_op */
281struct xen_userspace {
282    uint32_t scaling_setspeed;
283};
284typedef struct xen_userspace xen_userspace_t;
285
286struct xen_ondemand {
287    uint32_t sampling_rate_max;
288    uint32_t sampling_rate_min;
289
290    uint32_t sampling_rate;
291    uint32_t up_threshold;
292};
293typedef struct xen_ondemand xen_ondemand_t;
294
295/*
296 * cpufreq para name of this structure named
297 * same as sysfs file name of native linux
298 */
299#define CPUFREQ_NAME_LEN 16
300struct xen_get_cpufreq_para {
301    /* IN/OUT variable */
302    uint32_t cpu_num;
303    uint32_t freq_num;
304    uint32_t gov_num;
305
306    /* for all governors */
307    /* OUT variable */
308    XEN_GUEST_HANDLE_64(uint32) affected_cpus;
309    XEN_GUEST_HANDLE_64(uint32) scaling_available_frequencies;
310    XEN_GUEST_HANDLE_64(char)   scaling_available_governors;
311    char scaling_driver[CPUFREQ_NAME_LEN];
312
313    uint32_t cpuinfo_cur_freq;
314    uint32_t cpuinfo_max_freq;
315    uint32_t cpuinfo_min_freq;
316    uint32_t scaling_cur_freq;
317
318    char scaling_governor[CPUFREQ_NAME_LEN];
319    uint32_t scaling_max_freq;
320    uint32_t scaling_min_freq;
321
322    /* for specific governor */
323    union {
324        struct  xen_userspace userspace;
325        struct  xen_ondemand ondemand;
326    } u;
327
328    int32_t turbo_enabled;
329};
330
331struct xen_set_cpufreq_gov {
332    char scaling_governor[CPUFREQ_NAME_LEN];
333};
334
335struct xen_set_cpufreq_para {
336    #define SCALING_MAX_FREQ           1
337    #define SCALING_MIN_FREQ           2
338    #define SCALING_SETSPEED           3
339    #define SAMPLING_RATE              4
340    #define UP_THRESHOLD               5
341
342    uint32_t ctrl_type;
343    uint32_t ctrl_value;
344};
345
346struct xen_sysctl_pm_op {
347    #define PM_PARA_CATEGORY_MASK      0xf0
348    #define CPUFREQ_PARA               0x10
349
350    /* cpufreq command type */
351    #define GET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x01)
352    #define SET_CPUFREQ_GOV            (CPUFREQ_PARA | 0x02)
353    #define SET_CPUFREQ_PARA           (CPUFREQ_PARA | 0x03)
354    #define GET_CPUFREQ_AVGFREQ        (CPUFREQ_PARA | 0x04)
355
356    /* set/reset scheduler power saving option */
357    #define XEN_SYSCTL_pm_op_set_sched_opt_smt    0x21
358
359    /* cpuidle max_cstate access command */
360    #define XEN_SYSCTL_pm_op_get_max_cstate       0x22
361    #define XEN_SYSCTL_pm_op_set_max_cstate       0x23
362
363    /* set scheduler migration cost value */
364    #define XEN_SYSCTL_pm_op_set_vcpu_migration_delay   0x24
365    #define XEN_SYSCTL_pm_op_get_vcpu_migration_delay   0x25
366
367    /* enable/disable turbo mode when in dbs governor */
368    #define XEN_SYSCTL_pm_op_enable_turbo               0x26
369    #define XEN_SYSCTL_pm_op_disable_turbo              0x27
370
371    uint32_t cmd;
372    uint32_t cpuid;
373    union {
374        struct xen_get_cpufreq_para get_para;
375        struct xen_set_cpufreq_gov  set_gov;
376        struct xen_set_cpufreq_para set_para;
377        uint64_aligned_t get_avgfreq;
378        uint32_t                    set_sched_opt_smt;
379        uint32_t                    get_max_cstate;
380        uint32_t                    set_max_cstate;
381        uint32_t                    get_vcpu_migration_delay;
382        uint32_t                    set_vcpu_migration_delay;
383    } u;
384};
385
386/* XEN_SYSCTL_page_offline_op */
387struct xen_sysctl_page_offline_op {
388    /* IN: range of page to be offlined */
389#define sysctl_page_offline     1
390#define sysctl_page_online      2
391#define sysctl_query_page_offline  3
392    uint32_t cmd;
393    uint32_t start;
394    uint32_t end;
395    /* OUT: result of page offline request */
396    /*
397     * bit 0~15: result flags
398     * bit 16~31: owner
399     */
400    XEN_GUEST_HANDLE(uint32) status;
401};
402
403#define PG_OFFLINE_STATUS_MASK    (0xFFUL)
404
405/* The result is invalid, i.e. HV does not handle it */
406#define PG_OFFLINE_INVALID   (0x1UL << 0)
407
408#define PG_OFFLINE_OFFLINED  (0x1UL << 1)
409#define PG_OFFLINE_PENDING   (0x1UL << 2)
410#define PG_OFFLINE_FAILED    (0x1UL << 3)
411#define PG_OFFLINE_AGAIN     (0x1UL << 4)
412
413#define PG_ONLINE_FAILED     PG_OFFLINE_FAILED
414#define PG_ONLINE_ONLINED    PG_OFFLINE_OFFLINED
415
416#define PG_OFFLINE_STATUS_OFFLINED              (0x1UL << 1)
417#define PG_OFFLINE_STATUS_ONLINE                (0x1UL << 2)
418#define PG_OFFLINE_STATUS_OFFLINE_PENDING       (0x1UL << 3)
419#define PG_OFFLINE_STATUS_BROKEN                (0x1UL << 4)
420
421#define PG_OFFLINE_MISC_MASK    (0xFFUL << 4)
422
423/* valid when PG_OFFLINE_FAILED or PG_OFFLINE_PENDING */
424#define PG_OFFLINE_XENPAGE   (0x1UL << 8)
425#define PG_OFFLINE_DOM0PAGE  (0x1UL << 9)
426#define PG_OFFLINE_ANONYMOUS (0x1UL << 10)
427#define PG_OFFLINE_NOT_CONV_RAM   (0x1UL << 11)
428#define PG_OFFLINE_OWNED     (0x1UL << 12)
429
430#define PG_OFFLINE_BROKEN    (0x1UL << 13)
431#define PG_ONLINE_BROKEN     PG_OFFLINE_BROKEN
432
433#define PG_OFFLINE_OWNER_SHIFT 16
434
435/* XEN_SYSCTL_lockprof_op */
436/* Sub-operations: */
437#define XEN_SYSCTL_LOCKPROF_reset 1   /* Reset all profile data to zero. */
438#define XEN_SYSCTL_LOCKPROF_query 2   /* Get lock profile information. */
439/* Record-type: */
440#define LOCKPROF_TYPE_GLOBAL      0   /* global lock, idx meaningless */
441#define LOCKPROF_TYPE_PERDOM      1   /* per-domain lock, idx is domid */
442#define LOCKPROF_TYPE_N           2   /* number of types */
443struct xen_sysctl_lockprof_data {
444    char     name[40];     /* lock name (may include up to 2 %d specifiers) */
445    int32_t  type;         /* LOCKPROF_TYPE_??? */
446    int32_t  idx;          /* index (e.g. domain id) */
447    uint64_aligned_t lock_cnt;     /* # of locking succeeded */
448    uint64_aligned_t block_cnt;    /* # of wait for lock */
449    uint64_aligned_t lock_time;    /* nsecs lock held */
450    uint64_aligned_t block_time;   /* nsecs waited for lock */
451};
452typedef struct xen_sysctl_lockprof_data xen_sysctl_lockprof_data_t;
453DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_data_t);
454struct xen_sysctl_lockprof_op {
455    /* IN variables. */
456    uint32_t       cmd;               /* XEN_SYSCTL_LOCKPROF_??? */
457    uint32_t       max_elem;          /* size of output buffer */
458    /* OUT variables (query only). */
459    uint32_t       nr_elem;           /* number of elements available */
460    uint64_aligned_t time;            /* nsecs of profile measurement */
461    /* profile information (or NULL) */
462    XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
463};
464typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
465DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
466
467/* XEN_SYSCTL_cputopoinfo */
468#define XEN_INVALID_CORE_ID     (~0U)
469#define XEN_INVALID_SOCKET_ID   (~0U)
470#define XEN_INVALID_NODE_ID     (~0U)
471
472struct xen_sysctl_cputopo {
473    uint32_t core;
474    uint32_t socket;
475    uint32_t node;
476};
477typedef struct xen_sysctl_cputopo xen_sysctl_cputopo_t;
478DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopo_t);
479
480/*
481 * IN:
482 *  - a NULL 'cputopo' handle is a request for maximun 'num_cpus'.
483 *  - otherwise it's the number of entries in 'cputopo'
484 *
485 * OUT:
486 *  - If 'num_cpus' is less than the number Xen wants to write but the handle
487 *    handle is not a NULL one, partial data gets returned and 'num_cpus' gets
488 *    updated to reflect the intended number.
489 *  - Otherwise, 'num_cpus' shall indicate the number of entries written, which
490 *    may be less than the input value.
491 */
492struct xen_sysctl_cputopoinfo {
493    uint32_t num_cpus;
494    XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
495};
496typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
497DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
498
499/* XEN_SYSCTL_numainfo */
500#define XEN_INVALID_MEM_SZ     (~0U)
501#define XEN_INVALID_NODE_DIST  (~0U)
502
503struct xen_sysctl_meminfo {
504    uint64_t memsize;
505    uint64_t memfree;
506};
507typedef struct xen_sysctl_meminfo xen_sysctl_meminfo_t;
508DEFINE_XEN_GUEST_HANDLE(xen_sysctl_meminfo_t);
509
510/*
511 * IN:
512 *  - Both 'meminfo' and 'distance' handles being null is a request
513 *    for maximum value of 'num_nodes'.
514 *  - Otherwise it's the number of entries in 'meminfo' and square root
515 *    of number of entries in 'distance' (when corresponding handle is
516 *    non-null)
517 *
518 * OUT:
519 *  - If 'num_nodes' is less than the number Xen wants to write but either
520 *    handle is not a NULL one, partial data gets returned and 'num_nodes'
521 *    gets updated to reflect the intended number.
522 *  - Otherwise, 'num_nodes' shall indicate the number of entries written, which
523 *    may be less than the input value.
524 */
525
526struct xen_sysctl_numainfo {
527    uint32_t num_nodes;
528
529    XEN_GUEST_HANDLE_64(xen_sysctl_meminfo_t) meminfo;
530
531    /*
532     * Distance between nodes 'i' and 'j' is stored in index 'i*N + j',
533     * where N is the number of nodes that will be returned in 'num_nodes'
534     * (i.e. not 'num_nodes' provided by the caller)
535     */
536    XEN_GUEST_HANDLE_64(uint32) distance;
537};
538typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
539DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
540
541/* XEN_SYSCTL_cpupool_op */
542#define XEN_SYSCTL_CPUPOOL_OP_CREATE                1  /* C */
543#define XEN_SYSCTL_CPUPOOL_OP_DESTROY               2  /* D */
544#define XEN_SYSCTL_CPUPOOL_OP_INFO                  3  /* I */
545#define XEN_SYSCTL_CPUPOOL_OP_ADDCPU                4  /* A */
546#define XEN_SYSCTL_CPUPOOL_OP_RMCPU                 5  /* R */
547#define XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN            6  /* M */
548#define XEN_SYSCTL_CPUPOOL_OP_FREEINFO              7  /* F */
549#define XEN_SYSCTL_CPUPOOL_PAR_ANY     0xFFFFFFFF
550struct xen_sysctl_cpupool_op {
551    uint32_t op;          /* IN */
552    uint32_t cpupool_id;  /* IN: CDIARM OUT: CI */
553    uint32_t sched_id;    /* IN: C      OUT: I  */
554    uint32_t domid;       /* IN: M              */
555    uint32_t cpu;         /* IN: AR             */
556    uint32_t n_dom;       /*            OUT: I  */
557    struct xenctl_bitmap cpumap; /*     OUT: IF */
558};
559typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
560DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
561
562#define ARINC653_MAX_DOMAINS_PER_SCHEDULE   64
563/*
564 * This structure is used to pass a new ARINC653 schedule from a
565 * privileged domain (ie dom0) to Xen.
566 */
567struct xen_sysctl_arinc653_schedule {
568    /* major_frame holds the time for the new schedule's major frame
569     * in nanoseconds. */
570    uint64_aligned_t     major_frame;
571    /* num_sched_entries holds how many of the entries in the
572     * sched_entries[] array are valid. */
573    uint8_t     num_sched_entries;
574    /* The sched_entries array holds the actual schedule entries. */
575    struct {
576        /* dom_handle must match a domain's UUID */
577        xen_domain_handle_t dom_handle;
578        /* If a domain has multiple VCPUs, vcpu_id specifies which one
579         * this schedule entry applies to. It should be set to 0 if
580         * there is only one VCPU for the domain. */
581        unsigned int vcpu_id;
582        /* runtime specifies the amount of time that should be allocated
583         * to this VCPU per major frame. It is specified in nanoseconds */
584        uint64_aligned_t runtime;
585    } sched_entries[ARINC653_MAX_DOMAINS_PER_SCHEDULE];
586};
587typedef struct xen_sysctl_arinc653_schedule xen_sysctl_arinc653_schedule_t;
588DEFINE_XEN_GUEST_HANDLE(xen_sysctl_arinc653_schedule_t);
589
590struct xen_sysctl_credit_schedule {
591    /* Length of timeslice in milliseconds */
592#define XEN_SYSCTL_CSCHED_TSLICE_MAX 1000
593#define XEN_SYSCTL_CSCHED_TSLICE_MIN 1
594    unsigned tslice_ms;
595    /* Rate limit (minimum timeslice) in microseconds */
596#define XEN_SYSCTL_SCHED_RATELIMIT_MAX 500000
597#define XEN_SYSCTL_SCHED_RATELIMIT_MIN 100
598    unsigned ratelimit_us;
599};
600typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
601DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
602
603/* XEN_SYSCTL_scheduler_op */
604/* Set or get info? */
605#define XEN_SYSCTL_SCHEDOP_putinfo 0
606#define XEN_SYSCTL_SCHEDOP_getinfo 1
607struct xen_sysctl_scheduler_op {
608    uint32_t cpupool_id; /* Cpupool whose scheduler is to be targetted. */
609    uint32_t sched_id;   /* XEN_SCHEDULER_* (domctl.h) */
610    uint32_t cmd;        /* XEN_SYSCTL_SCHEDOP_* */
611    union {
612        struct xen_sysctl_sched_arinc653 {
613            XEN_GUEST_HANDLE_64(xen_sysctl_arinc653_schedule_t) schedule;
614        } sched_arinc653;
615        struct xen_sysctl_credit_schedule sched_credit;
616    } u;
617};
618typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
619DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
620
621/* XEN_SYSCTL_coverage_op */
622/*
623 * Get total size of information, to help allocate
624 * the buffer. The pointer points to a 32 bit value.
625 */
626#define XEN_SYSCTL_COVERAGE_get_total_size 0
627
628/*
629 * Read coverage information in a single run
630 * You must use a tool to split them.
631 */
632#define XEN_SYSCTL_COVERAGE_read           1
633
634/*
635 * Reset all the coverage counters to 0
636 * No parameters.
637 */
638#define XEN_SYSCTL_COVERAGE_reset          2
639
640/*
641 * Like XEN_SYSCTL_COVERAGE_read but reset also
642 * counters to 0 in a single call.
643 */
644#define XEN_SYSCTL_COVERAGE_read_and_reset 3
645
646struct xen_sysctl_coverage_op {
647    uint32_t cmd;        /* XEN_SYSCTL_COVERAGE_* */
648    union {
649        uint32_t total_size; /* OUT */
650        XEN_GUEST_HANDLE_64(uint8)  raw_info;   /* OUT */
651    } u;
652};
653typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
654DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
655
656#define XEN_SYSCTL_PSR_CMT_get_total_rmid            0
657#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor   1
658/* The L3 cache size is returned in KB unit */
659#define XEN_SYSCTL_PSR_CMT_get_l3_cache_size         2
660#define XEN_SYSCTL_PSR_CMT_enabled                   3
661#define XEN_SYSCTL_PSR_CMT_get_l3_event_mask         4
662struct xen_sysctl_psr_cmt_op {
663    uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_CMT_* */
664    uint32_t flags;     /* padding variable, may be extended for future use */
665    union {
666        uint64_t data;  /* OUT */
667        struct {
668            uint32_t cpu;   /* IN */
669            uint32_t rsvd;
670        } l3_cache;
671    } u;
672};
673typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
674DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
675
676/* XEN_SYSCTL_pcitopoinfo */
677#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
678struct xen_sysctl_pcitopoinfo {
679    /*
680     * IN: Number of elements in 'pcitopo' and 'nodes' arrays.
681     * OUT: Number of processed elements of those arrays.
682     */
683    uint32_t num_devs;
684
685    /* IN: list of devices for which node IDs are requested. */
686    XEN_GUEST_HANDLE_64(physdev_pci_device_t) devs;
687
688    /*
689     * OUT: node identifier for each device.
690     * If information for a particular device is not available then
691     * corresponding entry will be set to XEN_INVALID_NODE_ID. If
692     * device is not known to the hypervisor then XEN_INVALID_DEV
693     * will be provided.
694     */
695    XEN_GUEST_HANDLE_64(uint32) nodes;
696};
697typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
698DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
699
700#define XEN_SYSCTL_PSR_CAT_get_l3_info               0
701struct xen_sysctl_psr_cat_op {
702    uint32_t cmd;       /* IN: XEN_SYSCTL_PSR_CAT_* */
703    uint32_t target;    /* IN */
704    union {
705        struct {
706            uint32_t cbm_len;   /* OUT: CBM length */
707            uint32_t cos_max;   /* OUT: Maximum COS */
708        } l3_info;
709    } u;
710};
711typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
712DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
713
714#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
715
716#define XEN_SYSCTL_TMEM_OP_THAW                   0
717#define XEN_SYSCTL_TMEM_OP_FREEZE                 1
718#define XEN_SYSCTL_TMEM_OP_FLUSH                  2
719#define XEN_SYSCTL_TMEM_OP_DESTROY                3
720#define XEN_SYSCTL_TMEM_OP_LIST                   4
721#define XEN_SYSCTL_TMEM_OP_SET_WEIGHT             5
722#define XEN_SYSCTL_TMEM_OP_SET_CAP                6
723#define XEN_SYSCTL_TMEM_OP_SET_COMPRESS           7
724#define XEN_SYSCTL_TMEM_OP_QUERY_FREEABLE_MB      8
725#define XEN_SYSCTL_TMEM_OP_SAVE_BEGIN             10
726#define XEN_SYSCTL_TMEM_OP_SAVE_GET_VERSION       11
727#define XEN_SYSCTL_TMEM_OP_SAVE_GET_MAXPOOLS      12
728#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_WEIGHT 13
729#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_CAP    14
730#define XEN_SYSCTL_TMEM_OP_SAVE_GET_CLIENT_FLAGS  15
731#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_FLAGS    16
732#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_NPAGES   17
733#define XEN_SYSCTL_TMEM_OP_SAVE_GET_POOL_UUID     18
734#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_PAGE     19
735#define XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_INV      20
736#define XEN_SYSCTL_TMEM_OP_SAVE_END               21
737#define XEN_SYSCTL_TMEM_OP_RESTORE_BEGIN          30
738#define XEN_SYSCTL_TMEM_OP_RESTORE_PUT_PAGE       32
739#define XEN_SYSCTL_TMEM_OP_RESTORE_FLUSH_PAGE     33
740
741/*
742 * XEN_SYSCTL_TMEM_OP_SAVE_GET_NEXT_[PAGE|INV] override the 'buf' in
743 * xen_sysctl_tmem_op with this structure - sometimes with an extra
744 * page tackled on.
745 */
746struct tmem_handle {
747    uint32_t pool_id;
748    uint32_t index;
749    xen_tmem_oid_t oid;
750};
751
752struct xen_sysctl_tmem_op {
753    uint32_t cmd;       /* IN: XEN_SYSCTL_TMEM_OP_* . */
754    int32_t pool_id;    /* IN: 0 by default unless _SAVE_*, RESTORE_* .*/
755    uint32_t cli_id;    /* IN: client id, 0 for XEN_SYSCTL_TMEM_QUERY_FREEABLE_MB
756                           for all others can be the domain id or
757                           XEN_SYSCTL_TMEM_OP_ALL_CLIENTS for all. */
758    uint32_t arg1;      /* IN: If not applicable to command use 0. */
759    uint32_t arg2;      /* IN: If not applicable to command use 0. */
760    uint32_t pad;       /* Padding so structure is the same under 32 and 64. */
761    xen_tmem_oid_t oid; /* IN: If not applicable to command use 0s. */
762    XEN_GUEST_HANDLE_64(char) buf; /* IN/OUT: Buffer to save and restore ops. */
763};
764typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
765DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
766
767struct xen_sysctl {
768    uint32_t cmd;
769#define XEN_SYSCTL_readconsole                    1
770#define XEN_SYSCTL_tbuf_op                        2
771#define XEN_SYSCTL_physinfo                       3
772#define XEN_SYSCTL_sched_id                       4
773#define XEN_SYSCTL_perfc_op                       5
774#define XEN_SYSCTL_getdomaininfolist              6
775#define XEN_SYSCTL_debug_keys                     7
776#define XEN_SYSCTL_getcpuinfo                     8
777#define XEN_SYSCTL_availheap                      9
778#define XEN_SYSCTL_get_pmstat                    10
779#define XEN_SYSCTL_cpu_hotplug                   11
780#define XEN_SYSCTL_pm_op                         12
781#define XEN_SYSCTL_page_offline_op               14
782#define XEN_SYSCTL_lockprof_op                   15
783#define XEN_SYSCTL_cputopoinfo                   16
784#define XEN_SYSCTL_numainfo                      17
785#define XEN_SYSCTL_cpupool_op                    18
786#define XEN_SYSCTL_scheduler_op                  19
787#define XEN_SYSCTL_coverage_op                   20
788#define XEN_SYSCTL_psr_cmt_op                    21
789#define XEN_SYSCTL_pcitopoinfo                   22
790#define XEN_SYSCTL_psr_cat_op                    23
791#define XEN_SYSCTL_tmem_op                       24
792    uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
793    union {
794        struct xen_sysctl_readconsole       readconsole;
795        struct xen_sysctl_tbuf_op           tbuf_op;
796        struct xen_sysctl_physinfo          physinfo;
797        struct xen_sysctl_cputopoinfo       cputopoinfo;
798        struct xen_sysctl_pcitopoinfo       pcitopoinfo;
799        struct xen_sysctl_numainfo          numainfo;
800        struct xen_sysctl_sched_id          sched_id;
801        struct xen_sysctl_perfc_op          perfc_op;
802        struct xen_sysctl_getdomaininfolist getdomaininfolist;
803        struct xen_sysctl_debug_keys        debug_keys;
804        struct xen_sysctl_getcpuinfo        getcpuinfo;
805        struct xen_sysctl_availheap         availheap;
806        struct xen_sysctl_get_pmstat        get_pmstat;
807        struct xen_sysctl_cpu_hotplug       cpu_hotplug;
808        struct xen_sysctl_pm_op             pm_op;
809        struct xen_sysctl_page_offline_op   page_offline;
810        struct xen_sysctl_lockprof_op       lockprof_op;
811        struct xen_sysctl_cpupool_op        cpupool_op;
812        struct xen_sysctl_scheduler_op      scheduler_op;
813        struct xen_sysctl_coverage_op       coverage_op;
814        struct xen_sysctl_psr_cmt_op        psr_cmt_op;
815        struct xen_sysctl_psr_cat_op        psr_cat_op;
816        struct xen_sysctl_tmem_op           tmem_op;
817        uint8_t                             pad[128];
818    } u;
819};
820typedef struct xen_sysctl xen_sysctl_t;
821DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
822
823#endif /* __XEN_PUBLIC_SYSCTL_H__ */
824
825/*
826 * Local variables:
827 * mode: C
828 * c-file-style: "BSD"
829 * c-basic-offset: 4
830 * tab-width: 4
831 * indent-tabs-mode: nil
832 * End:
833 */
834