1/******************************************************************************
2 * memory.h
3 *
4 * Memory reservation and information.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
25 */
26
27#ifndef __XEN_PUBLIC_MEMORY_H__
28#define __XEN_PUBLIC_MEMORY_H__
29
30#include "xen.h"
31
32/*
33 * Increase or decrease the specified domain's memory reservation. Returns the
34 * number of extents successfully allocated or freed.
35 * arg == addr of struct xen_memory_reservation.
36 */
37#define XENMEM_increase_reservation 0
38#define XENMEM_decrease_reservation 1
39#define XENMEM_populate_physmap     6
40
41#if __XEN_INTERFACE_VERSION__ >= 0x00030209
42/*
43 * Maximum # bits addressable by the user of the allocated region (e.g., I/O
44 * devices often have a 32-bit limitation even in 64-bit systems). If zero
45 * then the user has no addressing restriction. This field is not used by
46 * XENMEM_decrease_reservation.
47 */
48#define XENMEMF_address_bits(x)     (x)
49#define XENMEMF_get_address_bits(x) ((x) & 0xffu)
50/* NUMA node to allocate from. */
51#define XENMEMF_node(x)     (((x) + 1) << 8)
52#define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
53/* Flag to populate physmap with populate-on-demand entries */
54#define XENMEMF_populate_on_demand (1<<16)
55/* Flag to request allocation only from the node specified */
56#define XENMEMF_exact_node_request  (1<<17)
57#define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
58#endif
59
60struct xen_memory_reservation {
61
62    /*
63     * XENMEM_increase_reservation:
64     *   OUT: MFN (*not* GMFN) bases of extents that were allocated
65     * XENMEM_decrease_reservation:
66     *   IN:  GMFN bases of extents to free
67     * XENMEM_populate_physmap:
68     *   IN:  GPFN bases of extents to populate with memory
69     *   OUT: GMFN bases of extents that were allocated
70     *   (NB. This command also updates the mach_to_phys translation table)
71     */
72    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
73
74    /* Number of extents, and size/alignment of each (2^extent_order pages). */
75    xen_ulong_t    nr_extents;
76    unsigned int   extent_order;
77
78#if __XEN_INTERFACE_VERSION__ >= 0x00030209
79    /* XENMEMF flags. */
80    unsigned int   mem_flags;
81#else
82    unsigned int   address_bits;
83#endif
84
85    /*
86     * Domain whose reservation is being changed.
87     * Unprivileged domains can specify only DOMID_SELF.
88     */
89    domid_t        domid;
90};
91typedef struct xen_memory_reservation xen_memory_reservation_t;
92DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
93
94/*
95 * An atomic exchange of memory pages. If return code is zero then
96 * @out.extent_list provides GMFNs of the newly-allocated memory.
97 * Returns zero on complete success, otherwise a negative error code.
98 * On complete success then always @nr_exchanged == @in.nr_extents.
99 * On partial success @nr_exchanged indicates how much work was done.
100 */
101#define XENMEM_exchange             11
102struct xen_memory_exchange {
103    /*
104     * [IN] Details of memory extents to be exchanged (GMFN bases).
105     * Note that @in.address_bits is ignored and unused.
106     */
107    struct xen_memory_reservation in;
108
109    /*
110     * [IN/OUT] Details of new memory extents.
111     * We require that:
112     *  1. @in.domid == @out.domid
113     *  2. @in.nr_extents  << @in.extent_order ==
114     *     @out.nr_extents << @out.extent_order
115     *  3. @in.extent_start and @out.extent_start lists must not overlap
116     *  4. @out.extent_start lists GPFN bases to be populated
117     *  5. @out.extent_start is overwritten with allocated GMFN bases
118     */
119    struct xen_memory_reservation out;
120
121    /*
122     * [OUT] Number of input extents that were successfully exchanged:
123     *  1. The first @nr_exchanged input extents were successfully
124     *     deallocated.
125     *  2. The corresponding first entries in the output extent list correctly
126     *     indicate the GMFNs that were successfully exchanged.
127     *  3. All other input and output extents are untouched.
128     *  4. If not all input exents are exchanged then the return code of this
129     *     command will be non-zero.
130     *  5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
131     */
132    xen_ulong_t nr_exchanged;
133};
134typedef struct xen_memory_exchange xen_memory_exchange_t;
135DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
136
137/*
138 * Returns the maximum machine frame number of mapped RAM in this system.
139 * This command always succeeds (it never returns an error code).
140 * arg == NULL.
141 */
142#define XENMEM_maximum_ram_page     2
143
144/*
145 * Returns the current or maximum memory reservation, in pages, of the
146 * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
147 * arg == addr of domid_t.
148 */
149#define XENMEM_current_reservation  3
150#define XENMEM_maximum_reservation  4
151
152/*
153 * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
154 */
155#define XENMEM_maximum_gpfn         14
156
157/*
158 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
159 * mapping table. Architectures which do not have a m2p table do not implement
160 * this command.
161 * arg == addr of xen_machphys_mfn_list_t.
162 */
163#define XENMEM_machphys_mfn_list    5
164struct xen_machphys_mfn_list {
165    /*
166     * Size of the 'extent_start' array. Fewer entries will be filled if the
167     * machphys table is smaller than max_extents * 2MB.
168     */
169    unsigned int max_extents;
170
171    /*
172     * Pointer to buffer to fill with list of extent starts. If there are
173     * any large discontiguities in the machine address space, 2MB gaps in
174     * the machphys table will be represented by an MFN base of zero.
175     */
176    XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
177
178    /*
179     * Number of extents written to the above array. This will be smaller
180     * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
181     */
182    unsigned int nr_extents;
183};
184typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
185DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
186
187/*
188 * Returns the location in virtual address space of the machine_to_phys
189 * mapping table. Architectures which do not have a m2p table, or which do not
190 * map it by default into guest address space, do not implement this command.
191 * arg == addr of xen_machphys_mapping_t.
192 */
193#define XENMEM_machphys_mapping     12
194struct xen_machphys_mapping {
195    xen_ulong_t v_start, v_end; /* Start and end virtual addresses.   */
196    xen_ulong_t max_mfn;        /* Maximum MFN that can be looked up. */
197};
198typedef struct xen_machphys_mapping xen_machphys_mapping_t;
199DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
200
201/*
202 * Sets the GPFN at which a particular page appears in the specified guest's
203 * pseudophysical address space.
204 * arg == addr of xen_add_to_physmap_t.
205 */
206#define XENMEM_add_to_physmap      7
207struct xen_add_to_physmap {
208    /* Which domain to change the mapping for. */
209    domid_t domid;
210
211    /* Number of pages to go through for gmfn_range */
212    uint16_t    size;
213
214    /* Source mapping space. */
215#define XENMAPSPACE_shared_info 0 /* shared info page */
216#define XENMAPSPACE_grant_table 1 /* grant table page */
217#define XENMAPSPACE_gmfn        2 /* GMFN */
218#define XENMAPSPACE_gmfn_range  3 /* GMFN range */
219    unsigned int space;
220
221#define XENMAPIDX_grant_table_status 0x80000000
222
223    /* Index into source mapping space. */
224    xen_ulong_t idx;
225
226    /* GPFN where the source mapping page should appear. */
227    xen_pfn_t     gpfn;
228};
229typedef struct xen_add_to_physmap xen_add_to_physmap_t;
230DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
231
232/*
233 * Unmaps the page appearing at a particular GPFN from the specified guest's
234 * pseudophysical address space.
235 * arg == addr of xen_remove_from_physmap_t.
236 */
237#define XENMEM_remove_from_physmap      15
238struct xen_remove_from_physmap {
239    /* Which domain to change the mapping for. */
240    domid_t domid;
241
242    /* GPFN of the current mapping of the page. */
243    xen_pfn_t     gpfn;
244};
245typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
246DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
247
248/*** REMOVED ***/
249/*#define XENMEM_translate_gpfn_list  8*/
250
251/*
252 * Returns the pseudo-physical memory map as it was when the domain
253 * was started (specified by XENMEM_set_memory_map).
254 * arg == addr of xen_memory_map_t.
255 */
256#define XENMEM_memory_map           9
257struct xen_memory_map {
258    /*
259     * On call the number of entries which can be stored in buffer. On
260     * return the number of entries which have been stored in
261     * buffer.
262     */
263    unsigned int nr_entries;
264
265    /*
266     * Entries in the buffer are in the same format as returned by the
267     * BIOS INT 0x15 EAX=0xE820 call.
268     */
269    XEN_GUEST_HANDLE(void) buffer;
270};
271typedef struct xen_memory_map xen_memory_map_t;
272DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
273
274/*
275 * Returns the real physical memory map. Passes the same structure as
276 * XENMEM_memory_map.
277 * arg == addr of xen_memory_map_t.
278 */
279#define XENMEM_machine_memory_map   10
280
281/*
282 * Set the pseudo-physical memory map of a domain, as returned by
283 * XENMEM_memory_map.
284 * arg == addr of xen_foreign_memory_map_t.
285 */
286#define XENMEM_set_memory_map       13
287struct xen_foreign_memory_map {
288    domid_t domid;
289    struct xen_memory_map map;
290};
291typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
292DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
293
294#define XENMEM_set_pod_target       16
295#define XENMEM_get_pod_target       17
296struct xen_pod_target {
297    /* IN */
298    uint64_t target_pages;
299    /* OUT */
300    uint64_t tot_pages;
301    uint64_t pod_cache_pages;
302    uint64_t pod_entries;
303    /* IN */
304    domid_t domid;
305};
306typedef struct xen_pod_target xen_pod_target_t;
307
308#if defined(__XEN__) || defined(__XEN_TOOLS__)
309
310#ifndef uint64_aligned_t
311#define uint64_aligned_t uint64_t
312#endif
313
314/*
315 * Get the number of MFNs saved through memory sharing.
316 * The call never fails.
317 */
318#define XENMEM_get_sharing_freed_pages    18
319#define XENMEM_get_sharing_shared_pages   19
320
321#define XENMEM_paging_op                    20
322#define XENMEM_paging_op_nominate           0
323#define XENMEM_paging_op_evict              1
324#define XENMEM_paging_op_prep               2
325
326#define XENMEM_access_op                    21
327#define XENMEM_access_op_resume             0
328
329struct xen_mem_event_op {
330    uint8_t     op;         /* XENMEM_*_op_* */
331    domid_t     domain;
332
333
334    /* PAGING_PREP IN: buffer to immediately fill page in */
335    uint64_aligned_t    buffer;
336    /* Other OPs */
337    uint64_aligned_t    gfn;           /* IN:  gfn of page being operated on */
338};
339typedef struct xen_mem_event_op xen_mem_event_op_t;
340DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
341
342#define XENMEM_sharing_op                   22
343#define XENMEM_sharing_op_nominate_gfn      0
344#define XENMEM_sharing_op_nominate_gref     1
345#define XENMEM_sharing_op_share             2
346#define XENMEM_sharing_op_resume            3
347#define XENMEM_sharing_op_debug_gfn         4
348#define XENMEM_sharing_op_debug_mfn         5
349#define XENMEM_sharing_op_debug_gref        6
350#define XENMEM_sharing_op_add_physmap       7
351#define XENMEM_sharing_op_audit             8
352
353#define XENMEM_SHARING_OP_S_HANDLE_INVALID  (-10)
354#define XENMEM_SHARING_OP_C_HANDLE_INVALID  (-9)
355
356/* The following allows sharing of grant refs. This is useful
357 * for sharing utilities sitting as "filters" in IO backends
358 * (e.g. memshr + blktap(2)). The IO backend is only exposed
359 * to grant references, and this allows sharing of the grefs */
360#define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG   (1ULL << 62)
361
362#define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val)  \
363    (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
364#define XENMEM_SHARING_OP_FIELD_IS_GREF(field)         \
365    ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
366#define XENMEM_SHARING_OP_FIELD_GET_GREF(field)        \
367    ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
368
369struct xen_mem_sharing_op {
370    uint8_t     op;     /* XENMEM_sharing_op_* */
371    domid_t     domain;
372
373    union {
374        struct mem_sharing_op_nominate {  /* OP_NOMINATE_xxx           */
375            union {
376                uint64_aligned_t gfn;     /* IN: gfn to nominate       */
377                uint32_t      grant_ref;  /* IN: grant ref to nominate */
378            } u;
379            uint64_aligned_t  handle;     /* OUT: the handle           */
380        } nominate;
381        struct mem_sharing_op_share {     /* OP_SHARE/ADD_PHYSMAP */
382            uint64_aligned_t source_gfn;    /* IN: the gfn of the source page */
383            uint64_aligned_t source_handle; /* IN: handle to the source page */
384            uint64_aligned_t client_gfn;    /* IN: the client gfn */
385            uint64_aligned_t client_handle; /* IN: handle to the client page */
386            domid_t  client_domain; /* IN: the client domain id */
387        } share;
388        struct mem_sharing_op_debug {     /* OP_DEBUG_xxx */
389            union {
390                uint64_aligned_t gfn;      /* IN: gfn to debug          */
391                uint64_aligned_t mfn;      /* IN: mfn to debug          */
392                uint32_t gref;     /* IN: gref to debug         */
393            } u;
394        } debug;
395    } u;
396};
397typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
398DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
399
400#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
401
402#endif /* __XEN_PUBLIC_MEMORY_H__ */
403
404/*
405 * Local variables:
406 * mode: C
407 * c-set-style: "BSD"
408 * c-basic-offset: 4
409 * tab-width: 4
410 * indent-tabs-mode: nil
411 * End:
412 */
413