1/*
2 * Copyright 2017, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <stdlib.h>
10
11#include <sel4/sel4.h>
12
13#include <sel4vm/arch/vmcs_fields.h>
14
15#include "vmcs.h"
16
17typedef struct guest_cr_virt_state {
18    /* mask represents bits that are owned by us, the host */
19    unsigned int cr0_mask;
20    unsigned int cr4_mask;
21    /* the shadow represents what the values of our owned bits should be seen as by the guest.
22     * i.e. the value they set it to */
23    unsigned int cr0_shadow;
24    unsigned int cr4_shadow;
25    /* for any bits owned by us, this represents what those bits should actually be set to */
26    unsigned int cr0_host_bits;
27    unsigned int cr4_host_bits;
28    /* the raw cr3 is only valid if we're trapping guest accesses to cr3, which we
29     * only do if the guest has not yet enabled paging for itself. If the guest has
30     * enabled paging then the value should be retrieved from the guest machine state */
31    uint32_t cr3_guest;
32} guest_cr_virt_state_t;
33
34typedef struct  guest_exit_information {
35    bool in_exit;
36    unsigned int reason;
37    unsigned int qualification;
38    unsigned int instruction_length;
39    unsigned int guest_physical;
40} guest_exit_information_t;
41
42typedef enum machine_state_status {
43    /* Unknown means guest has run and we have no idea if
44     * our value is up to date */
45    machine_state_unknown = 0,
46    /* Valid means the value we have is up to date and we
47     * have not modified it with respect to what the guest
48     * believes */
49    machine_state_valid,
50    /* As part of the virtualization we have modified the value
51     * and this needs to be sycned back to the guest */
52    machine_state_modified
53} machine_state_status_t;
54
55#define MACHINE_STATE(type, name) machine_state_status_t name##_status; type name
56#define MACHINE_STATE_READ(name, value) do { assert(name##_status == machine_state_unknown); name##_status = machine_state_valid; name = (value);} while(0)
57#define MACHINE_STATE_DIRTY(name) do { name##_status = machine_state_modified; } while(0)
58#define MACHINE_STATE_SYNC(name) do { assert(name##_status == machine_state_modified); name##_status = machine_state_valid; } while(0)
59#define MACHINE_STATE_INVAL(name) do { assert(name##_status != machine_state_modified); name##_status = machine_state_unknown; } while(0)
60#define MACHINE_STATE_SYNC_INVAL(name) do { MACHINE_STATE_SYNC(name); MACHINE_STATE_INVAL(name); } while(0)
61#define MACHINE_STATE_INIT(name) do { name##_status = machine_state_valid; } while(0)
62#define IS_MACHINE_STATE_VALID(name) (name##_status == machine_state_valid)
63#define IS_MACHINE_STATE_UNKNOWN(name) (name##_status == machine_state_unknown)
64#define IS_MACHINE_STATE_MODIFIED(name) (name##_status == machine_state_modified)
65
66typedef struct guest_machine_state {
67    MACHINE_STATE(seL4_VCPUContext, context);
68    MACHINE_STATE(unsigned int, cr0);
69    MACHINE_STATE(unsigned int, cr3);
70    MACHINE_STATE(unsigned int, cr4);
71    MACHINE_STATE(unsigned int, rflags);
72    MACHINE_STATE(unsigned int, guest_interruptibility);
73    MACHINE_STATE(unsigned int, idt_base);
74    MACHINE_STATE(unsigned int, idt_limit);
75    MACHINE_STATE(unsigned int, gdt_base);
76    MACHINE_STATE(unsigned int, gdt_limit);
77    MACHINE_STATE(unsigned int, cs_selector);
78    MACHINE_STATE(unsigned int, entry_exception_error_code);
79    /* This is state that we set on VMentry and get back on
80     * a vmexit, therefore it is always valid and correct */
81    unsigned int eip;
82    unsigned int control_entry;
83    unsigned int control_ppc;
84} guest_machine_state_t;
85
86/* Define the seL4_UserContext layout so we can treat it as an array */
87#define USER_CONTEXT_EAX 0
88#define USER_CONTEXT_EBX 1
89#define USER_CONTEXT_ECX 2
90#define USER_CONTEXT_EDX 3
91#define USER_CONTEXT_ESI 4
92#define USER_CONTEXT_EDI 5
93#define USER_CONTEXT_EBP 6
94
95typedef struct guest_virt_state {
96    guest_cr_virt_state_t cr;
97    /* are we hlt'ed waiting for an interrupted */
98    int interrupt_halt;
99} guest_virt_state_t;
100
101typedef struct guest_state {
102    /* Guest machine state. Bits of this may or may not be valid.
103     * This is all state that the guest itself observes and modifies.
104     * Either explicitly (registers) or implicitly (interruptability state) */
105    guest_machine_state_t machine;
106    /* Internal state that we use for handling guest state virtualization. */
107    guest_virt_state_t virt;
108    /* Information relating specifically to a guest exist, that is generated
109     * as a result of the exit */
110    guest_exit_information_t exit;
111} guest_state_t;
112
113static inline bool vm_guest_state_no_modified(guest_state_t *gs)
114{
115    return !(
116               IS_MACHINE_STATE_MODIFIED(gs->machine.context) ||
117               IS_MACHINE_STATE_MODIFIED(gs->machine.cr0) ||
118               IS_MACHINE_STATE_MODIFIED(gs->machine.cr3) ||
119               IS_MACHINE_STATE_MODIFIED(gs->machine.cr4) ||
120               IS_MACHINE_STATE_MODIFIED(gs->machine.rflags) ||
121               IS_MACHINE_STATE_MODIFIED(gs->machine.guest_interruptibility) ||
122               IS_MACHINE_STATE_MODIFIED(gs->machine.idt_base) ||
123               IS_MACHINE_STATE_MODIFIED(gs->machine.idt_limit) ||
124               IS_MACHINE_STATE_MODIFIED(gs->machine.gdt_base) ||
125               IS_MACHINE_STATE_MODIFIED(gs->machine.gdt_limit) ||
126               IS_MACHINE_STATE_MODIFIED(gs->machine.cs_selector) ||
127               IS_MACHINE_STATE_MODIFIED(gs->machine.entry_exception_error_code)
128           );
129}
130
131static inline void vm_guest_state_initialise(guest_state_t *gs)
132{
133    memset(gs, 0, sizeof(guest_state_t));
134    MACHINE_STATE_INIT(gs->machine.context);
135    MACHINE_STATE_INIT(gs->machine.cr0);
136    MACHINE_STATE_INIT(gs->machine.cr3);
137    MACHINE_STATE_INIT(gs->machine.cr4);
138    MACHINE_STATE_INIT(gs->machine.rflags);
139    MACHINE_STATE_INIT(gs->machine.guest_interruptibility);
140    MACHINE_STATE_INIT(gs->machine.idt_base);
141    MACHINE_STATE_INIT(gs->machine.idt_limit);
142    MACHINE_STATE_INIT(gs->machine.gdt_base);
143    MACHINE_STATE_INIT(gs->machine.gdt_limit);
144    MACHINE_STATE_INIT(gs->machine.cs_selector);
145    MACHINE_STATE_INIT(gs->machine.entry_exception_error_code);
146}
147
148static inline void vm_guest_state_invalidate_all(guest_state_t *gs)
149{
150    MACHINE_STATE_INVAL(gs->machine.context);
151    MACHINE_STATE_INVAL(gs->machine.cr0);
152    MACHINE_STATE_INVAL(gs->machine.cr3);
153    MACHINE_STATE_INVAL(gs->machine.cr4);
154    MACHINE_STATE_INVAL(gs->machine.rflags);
155    MACHINE_STATE_INVAL(gs->machine.guest_interruptibility);
156    MACHINE_STATE_INVAL(gs->machine.idt_base);
157    MACHINE_STATE_INVAL(gs->machine.idt_limit);
158    MACHINE_STATE_INVAL(gs->machine.gdt_base);
159    MACHINE_STATE_INVAL(gs->machine.gdt_limit);
160    MACHINE_STATE_INVAL(gs->machine.cs_selector);
161    MACHINE_STATE_INVAL(gs->machine.entry_exception_error_code);
162}
163
164/* get */
165static inline unsigned int vm_guest_state_get_eip(guest_state_t *gs)
166{
167    return gs->machine.eip;
168}
169
170static inline unsigned int vm_guest_state_get_cr0(guest_state_t *gs, seL4_CPtr vcpu)
171{
172    int err;
173    unsigned int value;
174    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.cr0)) {
175        err = vm_vmcs_read(vcpu, VMX_GUEST_CR0, &value);
176        assert(!err);
177        MACHINE_STATE_READ(gs->machine.cr0, value);
178    }
179    return gs->machine.cr0;
180}
181
182static inline unsigned int vm_guest_state_get_cr3(guest_state_t *gs, seL4_CPtr vcpu)
183{
184    int err;
185    unsigned int value;
186    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.cr3)) {
187        err = vm_vmcs_read(vcpu, VMX_GUEST_CR3, &value);
188        assert(!err);
189        MACHINE_STATE_READ(gs->machine.cr3, value);
190    }
191    return gs->machine.cr3;
192}
193
194static inline unsigned int vm_guest_state_get_cr4(guest_state_t *gs, seL4_CPtr vcpu)
195{
196    int err;
197    unsigned int value;
198    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.cr4)) {
199        err = vm_vmcs_read(vcpu, VMX_GUEST_CR4, &value);
200        assert(!err);
201        MACHINE_STATE_READ(gs->machine.cr4, value);
202    }
203    return gs->machine.cr4;
204}
205
206static inline unsigned int vm_guest_state_get_rflags(guest_state_t *gs, seL4_CPtr vcpu)
207{
208    int err;
209    unsigned int value;
210    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.rflags)) {
211        err = vm_vmcs_read(vcpu, VMX_GUEST_RFLAGS, &value);
212        assert(!err);
213        MACHINE_STATE_READ(gs->machine.rflags, value);
214    }
215    return gs->machine.rflags;
216}
217
218static inline unsigned int vm_guest_state_get_interruptibility(guest_state_t *gs, seL4_CPtr vcpu)
219{
220    int err;
221    unsigned int value;
222    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.guest_interruptibility)) {
223        err = vm_vmcs_read(vcpu, VMX_GUEST_INTERRUPTABILITY, &value);
224        assert(!err);
225        MACHINE_STATE_READ(gs->machine.guest_interruptibility, value);
226    }
227    return gs->machine.guest_interruptibility;
228}
229
230static inline unsigned int vm_guest_state_get_control_entry(guest_state_t *gs)
231{
232    return gs->machine.control_entry;
233}
234
235static inline unsigned int vm_guest_state_get_control_ppc(guest_state_t *gs)
236{
237    return gs->machine.control_ppc;
238}
239
240static inline unsigned int vm_guest_state_get_idt_base(guest_state_t *gs, seL4_CPtr vcpu)
241{
242    int err;
243    unsigned int value;
244    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.idt_base)) {
245        err = vm_vmcs_read(vcpu, VMX_GUEST_IDTR_BASE, &value);
246        assert(!err);
247        MACHINE_STATE_READ(gs->machine.idt_base, value);
248    }
249    return gs->machine.idt_base;
250}
251
252static inline unsigned int vm_guest_state_get_idt_limit(guest_state_t *gs, seL4_CPtr vcpu)
253{
254    int err;
255    unsigned int value;
256    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.idt_limit)) {
257        err = vm_vmcs_read(vcpu, VMX_GUEST_IDTR_LIMIT, &value);
258        assert(!err);
259        MACHINE_STATE_READ(gs->machine.idt_limit, value);
260    }
261    return gs->machine.idt_limit;
262}
263
264static inline unsigned int vm_guest_state_get_gdt_base(guest_state_t *gs, seL4_CPtr vcpu)
265{
266    int err;
267    unsigned int value;
268    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.gdt_base)) {
269        err = vm_vmcs_read(vcpu, VMX_GUEST_GDTR_BASE, &value);
270        assert(!err);
271        MACHINE_STATE_READ(gs->machine.gdt_base, value);
272    }
273    return gs->machine.gdt_base;
274}
275
276static inline unsigned int vm_guest_state_get_gdt_limit(guest_state_t *gs, seL4_CPtr vcpu)
277{
278    int err;
279    unsigned int value;
280    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.gdt_limit)) {
281        err = vm_vmcs_read(vcpu, VMX_GUEST_GDTR_LIMIT, &value);
282        assert(!err);
283        MACHINE_STATE_READ(gs->machine.gdt_limit, value);
284    }
285    return gs->machine.gdt_limit;
286}
287
288static inline unsigned int vm_guest_state_get_cs_selector(guest_state_t *gs, seL4_CPtr vcpu)
289{
290    int err;
291    unsigned int value;
292    if (IS_MACHINE_STATE_UNKNOWN(gs->machine.cs_selector)) {
293        err = vm_vmcs_read(vcpu, VMX_GUEST_CS_SELECTOR, &value);
294        assert(!err);
295        MACHINE_STATE_READ(gs->machine.cs_selector, value);
296    }
297    return gs->machine.cs_selector;
298}
299
300/* set */
301static inline void vm_guest_state_set_eip(guest_state_t *gs, unsigned int val)
302{
303    gs->machine.eip = val;
304}
305
306static inline void vm_guest_state_set_cr0(guest_state_t *gs, unsigned int val)
307{
308    MACHINE_STATE_DIRTY(gs->machine.cr0);
309    gs->machine.cr0 = val;
310}
311
312static inline void vm_guest_state_set_cr3(guest_state_t *gs, unsigned int val)
313{
314    MACHINE_STATE_DIRTY(gs->machine.cr3);
315    gs->machine.cr3 = val;
316}
317
318static inline void vm_guest_state_set_cr4(guest_state_t *gs, unsigned int val)
319{
320    MACHINE_STATE_DIRTY(gs->machine.cr4);
321    gs->machine.cr4 = val;
322}
323
324static inline void vm_guest_state_set_rflags(guest_state_t *gs, unsigned int val)
325{
326    MACHINE_STATE_DIRTY(gs->machine.rflags);
327    gs->machine.rflags = val;
328}
329
330static inline void vm_guest_state_set_control_entry(guest_state_t *gs, unsigned int val)
331{
332    gs->machine.control_entry = val;
333}
334
335static inline void vm_guest_state_set_control_ppc(guest_state_t *gs, unsigned int val)
336{
337    gs->machine.control_ppc = val;
338}
339
340static inline void vm_guest_state_set_idt_base(guest_state_t *gs, unsigned int val)
341{
342    MACHINE_STATE_DIRTY(gs->machine.idt_base);
343    gs->machine.idt_base = val;
344}
345
346static inline void vm_guest_state_set_idt_limit(guest_state_t *gs, unsigned int val)
347{
348    MACHINE_STATE_DIRTY(gs->machine.idt_limit);
349    gs->machine.idt_limit = val;
350}
351
352static inline void vm_guest_state_set_gdt_base(guest_state_t *gs, unsigned int val)
353{
354    MACHINE_STATE_DIRTY(gs->machine.gdt_base);
355    gs->machine.gdt_base = val;
356}
357
358static inline void vm_guest_state_set_gdt_limit(guest_state_t *gs, unsigned int val)
359{
360    MACHINE_STATE_DIRTY(gs->machine.gdt_limit);
361    gs->machine.gdt_limit = val;
362}
363
364static inline void vm_guest_state_set_cs_selector(guest_state_t *gs, unsigned int val)
365{
366    MACHINE_STATE_DIRTY(gs->machine.cs_selector);
367    gs->machine.cs_selector = val;
368}
369
370static inline void vm_guest_state_set_entry_exception_error_code(guest_state_t *gs, unsigned int val)
371{
372    MACHINE_STATE_DIRTY(gs->machine.entry_exception_error_code);
373    gs->machine.entry_exception_error_code = val;
374}
375
376/* sync */
377static inline void vm_guest_state_sync_cr0(guest_state_t *gs, seL4_CPtr vcpu)
378{
379    if (IS_MACHINE_STATE_MODIFIED(gs->machine.cr0)) {
380        int err = vm_vmcs_write(vcpu, VMX_GUEST_CR0, gs->machine.cr0);
381        assert(!err);
382        MACHINE_STATE_SYNC(gs->machine.cr0);
383    }
384}
385
386static inline void vm_guest_state_sync_cr3(guest_state_t *gs, seL4_CPtr vcpu)
387{
388    if (IS_MACHINE_STATE_MODIFIED(gs->machine.cr3)) {
389        int err = vm_vmcs_write(vcpu, VMX_GUEST_CR3, gs->machine.cr3);
390        assert(!err);
391        MACHINE_STATE_SYNC(gs->machine.cr3);
392    }
393}
394
395static inline void vm_guest_state_sync_cr4(guest_state_t *gs, seL4_CPtr vcpu)
396{
397    if (IS_MACHINE_STATE_MODIFIED(gs->machine.cr4)) {
398        int err = vm_vmcs_write(vcpu, VMX_GUEST_CR4, gs->machine.cr4);
399        assert(!err);
400        MACHINE_STATE_SYNC(gs->machine.cr4);
401    }
402}
403
404static inline void vm_guest_state_sync_rflags(guest_state_t *gs, seL4_CPtr vcpu)
405{
406    if (IS_MACHINE_STATE_MODIFIED(gs->machine.rflags)) {
407        int err = vm_vmcs_write(vcpu, VMX_GUEST_RFLAGS, gs->machine.rflags);
408        assert(!err);
409        MACHINE_STATE_SYNC(gs->machine.rflags);
410    }
411}
412
413static inline void vm_guest_state_sync_idt_base(guest_state_t *gs, seL4_CPtr vcpu)
414{
415    if (IS_MACHINE_STATE_MODIFIED(gs->machine.idt_base)) {
416        int err = vm_vmcs_write(vcpu, VMX_GUEST_IDTR_BASE, gs->machine.idt_base);
417        assert(!err);
418        MACHINE_STATE_SYNC(gs->machine.idt_base);
419    }
420}
421
422static inline void vm_guest_state_sync_idt_limit(guest_state_t *gs, seL4_CPtr vcpu)
423{
424    if (IS_MACHINE_STATE_MODIFIED(gs->machine.idt_limit)) {
425        int err = vm_vmcs_write(vcpu, VMX_GUEST_IDTR_LIMIT, gs->machine.idt_limit);
426        assert(!err);
427        MACHINE_STATE_SYNC(gs->machine.idt_limit);
428    }
429}
430
431static inline void vm_guest_state_sync_gdt_base(guest_state_t *gs, seL4_CPtr vcpu)
432{
433    if (IS_MACHINE_STATE_MODIFIED(gs->machine.gdt_base)) {
434        int err = vm_vmcs_write(vcpu, VMX_GUEST_GDTR_BASE, gs->machine.gdt_base);
435        assert(!err);
436        MACHINE_STATE_SYNC(gs->machine.gdt_base);
437    }
438}
439
440static inline void vm_guest_state_sync_gdt_limit(guest_state_t *gs, seL4_CPtr vcpu)
441{
442    if (IS_MACHINE_STATE_MODIFIED(gs->machine.gdt_limit)) {
443        int err = vm_vmcs_write(vcpu, VMX_GUEST_GDTR_LIMIT, gs->machine.gdt_limit);
444        assert(!err);
445        MACHINE_STATE_SYNC(gs->machine.gdt_limit);
446    }
447}
448
449static inline void vm_guest_state_sync_cs_selector(guest_state_t *gs, seL4_CPtr vcpu)
450{
451    if (IS_MACHINE_STATE_MODIFIED(gs->machine.cs_selector)) {
452        int err = vm_vmcs_write(vcpu, VMX_GUEST_CS_SELECTOR, gs->machine.cs_selector);
453        assert(!err);
454        MACHINE_STATE_SYNC(gs->machine.cs_selector);
455    }
456}
457
458static inline void vm_guest_state_sync_entry_exception_error_code(guest_state_t *gs, seL4_CPtr vcpu)
459{
460    if (IS_MACHINE_STATE_MODIFIED(gs->machine.entry_exception_error_code)) {
461        int err = vm_vmcs_write(vcpu, VMX_CONTROL_ENTRY_EXCEPTION_ERROR_CODE, gs->machine.entry_exception_error_code);
462        assert(!err);
463        MACHINE_STATE_SYNC(gs->machine.entry_exception_error_code);
464    }
465}
466
467static inline void vm_sync_guest_vmcs_state(vm_vcpu_t *vcpu)
468{
469    vm_guest_state_sync_cr0(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
470    vm_guest_state_sync_cr3(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
471    vm_guest_state_sync_cr4(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
472    vm_guest_state_sync_rflags(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
473    vm_guest_state_sync_idt_base(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
474    vm_guest_state_sync_idt_limit(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
475    vm_guest_state_sync_gdt_base(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
476    vm_guest_state_sync_gdt_limit(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
477    vm_guest_state_sync_cs_selector(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
478    vm_guest_state_sync_entry_exception_error_code(vcpu->vcpu_arch.guest_state, vcpu->vcpu.cptr);
479}
480
481/**
482 * Sync a VCPU's current context state (seL4_VCPUContext)
483 * @param[in] vcpu      Handle to the vcpu
484 * @return              0 on success, otherwise -1 for error
485 */
486int vm_sync_guest_context(vm_vcpu_t *vcpu);
487
488/* Exit */
489static inline unsigned int vm_guest_exit_get_reason(guest_state_t *gs)
490{
491    assert(gs->exit.in_exit);
492    return gs->exit.reason;
493}
494
495static inline unsigned int vm_guest_exit_get_physical(guest_state_t *gs)
496{
497    assert(gs->exit.in_exit);
498    return gs->exit.guest_physical;
499}
500
501static inline unsigned int vm_guest_exit_get_int_len(guest_state_t *gs)
502{
503    assert(gs->exit.in_exit);
504    return gs->exit.instruction_length;
505}
506
507static inline unsigned int vm_guest_exit_get_qualification(guest_state_t *gs)
508{
509    assert(gs->exit.in_exit);
510    return gs->exit.qualification;
511}
512
513static inline void vm_guest_exit_next_instruction(guest_state_t *gs, seL4_CPtr vcpu)
514{
515    vm_guest_state_set_eip(gs, vm_guest_state_get_eip(gs) + vm_guest_exit_get_int_len(gs));
516}
517
518