1/**
2 * \file
3 */
4
5/*
6 * Copyright (c) 2009, ETH Zurich.
7 * All rights reserved.
8 *
9 * This file is distributed under the terms in the attached LICENSE file.
10 * If you do not find this file, copies can be found by writing to:
11 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
12 */
13
14#include <stdio.h> // for printf()
15#include "vmkitmon.h"
16#include <x86emu.h>
17#include <stdarg.h>
18#include "realmode.h"
19#ifdef CONFIG_SVM
20#include "svm.h"
21#endif
22#include "x86.h"
23
24static struct guest *env = NULL;
25static bool valid_exit = false;
26
27#ifndef CONFIG_SVM
28// Global variables are used, since the corresponding fields are read-only
29// in the VMCS.
30uint16_t saved_exit_reason;
31uint64_t saved_exit_qual, saved_rip;
32#endif
33
34#ifdef CONFIG_SVM
35static inline void
36set_vmcb_exit(amd_vmcb_t *vmcb, uint64_t code, uint64_t info1, uint64_t info2)
37{
38    amd_vmcb_exitcode_wr(vmcb, code);
39    amd_vmcb_exitinfo1_wr(vmcb, info1);
40    amd_vmcb_exitinfo2_wr(vmcb, info2);
41}
42#endif
43/* real mode interface functions */
44
45static uint8_t
46io_inb (uint16_t port)
47{
48    assert(!"inb not implemented");
49    return 0;
50}
51
52static uint16_t
53io_inw (uint16_t port)
54{
55    assert(!"inw not implemented");
56    return 0;
57}
58
59static uint32_t
60io_inl (uint16_t port)
61{
62    assert(!"inl not implemented");
63    return 0;
64}
65
66static void
67io_outb (uint16_t port, uint8_t val)
68{
69#ifdef CONFIG_SVM
70    uint32_t info1;
71
72    info1 = X86_IO_ACCESS_SZ8 | X86_IO_ACCESS_A16;
73    info1 |= port << 16;
74    set_vmcb_exit(&env->vmcb, SVM_VMEXIT_IOIO, info1, M.x86.R_EIP);
75#else
76    saved_exit_qual = 0x1 | (port << 16);
77    saved_rip = M.x86.R_EIP;
78    saved_exit_reason = VMX_EXIT_REASON_INOUT;
79#endif
80    valid_exit = true;
81
82    // move EIP back to the start of the instruction
83    M.x86.R_EIP -= 2;
84
85    HALT_SYS();
86}
87
88static void
89io_outw (uint16_t port, uint16_t val)
90{
91    assert(!"outw not implemented");
92}
93
94static void
95io_outl (uint16_t port, uint32_t val)
96{
97    assert(!"outl not implemented");
98}
99
100static void
101int_handler (int num)
102{
103    // check whether the interrupt corresponds to an exception
104    // in real-mode everything from 10 is not an ecxeption
105    if (num < 10) {
106        // exception raised
107        // check whether this exception should be intercepted
108#ifdef CONFIG_SVM
109        if (amd_vmcb_exceptions_rd_raw(&env->vmcb) & (1 << num)) {
110#else
111	uint64_t excp_bmp;
112	errval_t err = invoke_dispatcher_vmread(env->dcb_cap, VMX_EXCP_BMP, &excp_bmp);
113	assert(err_is_ok(err));
114	if (excp_bmp & (1 << num)) {
115#endif
116            assert(!"Intercepted exception raised");
117        } else {
118            assert(!"Realmode raised an exception which is not captured");
119        }
120    } else {
121        // software interrupt raised
122        // check whether we are interessted in SW interrupts
123#ifdef CONFIG_SVM
124        if (amd_vmcb_intercepts_rd(&env->vmcb).intn == 1) {
125            set_vmcb_exit(&env->vmcb, SVM_VMEXIT_SWINT, 0, 0);
126            valid_exit = true;
127        } else {
128            assert(!"SWINT occured but not intercepted by the VMM");
129        }
130#else
131	saved_exit_reason = VMX_EXIT_REASON_SWINT;
132	valid_exit = true;
133#endif
134    }
135
136    // move EIP back to the start of the instruction
137    M.x86.R_EIP -= 2;
138
139    HALT_SYS();
140}
141
142/**
143 * \brief Initializes this module.
144 *
145 * Needs to be called before any other call to realmode functionality.
146 *
147 * \return Zero on success, non-zero on failure
148 */
149errval_t
150realmode_init (void)
151{
152    // initialize the io hooks
153    X86EMU_pioFuncs io_hooks = {
154        .inb = io_inb,
155        .inw = io_inw,
156        .inl = io_inl,
157        .outb = io_outb,
158        .outw = io_outw,
159        .outl = io_outl
160    };
161    X86EMU_setupPioFuncs(&io_hooks);
162
163    // initialize interrupt handers
164    X86EMU_intrFuncs int_hooks[256];
165    for (int i = 0; i < 256; i++) {
166        int_hooks[i] = int_handler;
167    }
168    X86EMU_setupIntrFuncs(int_hooks);
169
170    return SYS_ERR_OK;
171}
172
173void
174realmode_switch_to (struct guest *g)
175{
176    assert(g != NULL);
177    assert(env == NULL);
178
179    env = g;
180
181    // sanity check
182    assert(g->mem_low_va == 0 && g->mem_high_va >= 0x100000);
183
184    // copy the registers
185#ifdef CONFIG_SVM
186    M.x86.R_ESP = amd_vmcb_rsp_rd(&g->vmcb);
187    M.x86.R_EIP = amd_vmcb_rip_rd(&g->vmcb);
188    M.x86.R_EFLG = amd_vmcb_rflags_rd_raw(&g->vmcb);
189    M.x86.R_EAX = amd_vmcb_rax_rd(&g->vmcb);
190#else
191    uint64_t guest_rsp;
192    errval_t err = invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_RSP, &guest_rsp);
193    M.x86.R_ESP = guest_rsp;
194
195    uint64_t guest_rip;
196    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_RIP, &guest_rip);
197    M.x86.R_EIP = guest_rip;
198
199    uint64_t guest_rflags;
200    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_RFLAGS, &guest_rflags);
201    M.x86.R_EFLG = guest_rflags;
202    M.x86.R_EAX = g->ctrl->regs.rax;
203#endif
204    M.x86.R_EBX = g->ctrl->regs.rbx;
205    M.x86.R_ECX = g->ctrl->regs.rcx;
206    M.x86.R_EDX = g->ctrl->regs.rdx;
207
208    M.x86.R_EBP = g->ctrl->regs.rbp;
209    M.x86.R_ESI = g->ctrl->regs.rsi;
210    M.x86.R_EDI = g->ctrl->regs.rdi;
211
212    // calculate the segment selector from the supplied base because the stored
213    // selector might not point to the correct RM segment
214#ifdef CONFIG_SVM
215    M.x86.R_CS = amd_vmcb_cs_base_rd(&g->vmcb) >> 4;
216    M.x86.R_DS = amd_vmcb_ds_base_rd(&g->vmcb) >> 4;
217    M.x86.R_ES = amd_vmcb_es_base_rd(&g->vmcb) >> 4;
218    M.x86.R_FS = amd_vmcb_fs_base_rd(&g->vmcb) >> 4;
219    M.x86.R_GS = amd_vmcb_gs_base_rd(&g->vmcb) >> 4;
220    M.x86.R_SS = amd_vmcb_ss_base_rd(&g->vmcb) >> 4;
221#else
222    uint64_t guest_cs_base;
223    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_CS_BASE, &guest_cs_base);
224    M.x86.R_CS = guest_cs_base >> 4;
225
226    uint64_t guest_ds_base;
227    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_DS_BASE, &guest_ds_base);
228    M.x86.R_DS = guest_ds_base >> 4;
229
230    uint64_t guest_es_base;
231    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_ES_BASE, &guest_es_base);
232    M.x86.R_ES = guest_es_base >> 4;
233
234    uint64_t guest_fs_base;
235    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_FS_BASE, &guest_fs_base);
236    M.x86.R_FS = guest_fs_base >> 4;
237
238    uint64_t guest_gs_base;
239    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_GS_BASE, &guest_gs_base);
240    M.x86.R_GS = guest_gs_base >> 4;
241
242    uint64_t guest_ss_base;
243    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_SS_BASE, &guest_ss_base);
244    M.x86.R_SS = guest_ss_base >> 4;
245
246    assert(err_is_ok(err));
247#endif
248    // copy memory location
249    M.mem_base = guest_to_host(g->mem_low_va);
250    if (g->a20_gate_enabled) {
251        // add 1024 byte at the end if the a20 gate is enabled
252        M.mem_size = 0x100400;
253    } else {
254        // without a20 gate we are suppoed to have 1MB of memory
255        M.mem_size = 0x100000;
256    }
257}
258
259void
260realmode_switch_from (struct guest *g)
261{
262    // save all state
263#ifdef CONFIG_SVM
264    amd_vmcb_rsp_wr(&g->vmcb, M.x86.R_ESP);
265    amd_vmcb_rip_wr(&g->vmcb, M.x86.R_EIP);
266    amd_vmcb_rflags_wr_raw(&g->vmcb, M.x86.R_EFLG);
267    amd_vmcb_rax_wr(&g->vmcb, M.x86.R_EAX);
268#else
269    errval_t err = invoke_dispatcher_vmwrite(g->dcb_cap, VMX_GUEST_RSP, M.x86.R_ESP);
270    err += invoke_dispatcher_vmwrite(g->dcb_cap, VMX_GUEST_RIP, M.x86.R_EIP);
271    err += invoke_dispatcher_vmwrite(g->dcb_cap, VMX_GUEST_RFLAGS, M.x86.R_EFLG);
272    g->ctrl->regs.rax = M.x86.R_EAX;
273#endif
274    g->ctrl->regs.rbx = M.x86.R_EBX;
275    g->ctrl->regs.rcx = M.x86.R_ECX;
276    g->ctrl->regs.rdx = M.x86.R_EDX;
277
278    g->ctrl->regs.rbp = M.x86.R_EBP;
279    g->ctrl->regs.rsi = M.x86.R_ESI;
280    g->ctrl->regs.rdi = M.x86.R_EDI;
281
282    // only copy the segments back if they were changed during execution
283    // take the base as reference value because the selector might be invalid
284    // (this happens e.g. in a switch from protected mode to real-mode, where
285    // real-mode segment is read from the GDT)
286    // FIXME: this is not 100% save: If the code changes the seg selectors to
287    //        same value as the initial one then it wont be captured here
288#ifdef CONFIG_SVM
289    if ((amd_vmcb_cs_base_rd(&g->vmcb) >> 4) != M.x86.R_CS) {
290        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, cs, M.x86.R_CS);
291    }
292    if ((amd_vmcb_ds_base_rd(&g->vmcb) >> 4) != M.x86.R_DS) {
293        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, ds, M.x86.R_DS);
294    }
295    if ((amd_vmcb_es_base_rd(&g->vmcb) >> 4) != M.x86.R_ES) {
296        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, es, M.x86.R_ES);
297    }
298    if ((amd_vmcb_fs_base_rd(&g->vmcb) >> 4) != M.x86.R_FS) {
299        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, fs, M.x86.R_FS);
300    }
301    if ((amd_vmcb_gs_base_rd(&g->vmcb) >> 4) != M.x86.R_GS) {
302        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, gs, M.x86.R_GS);
303    }
304    if ((amd_vmcb_ss_base_rd(&g->vmcb) >> 4) != M.x86.R_SS) {
305        VMCB_WRITE_SEGREG_REALMODE(&g->vmcb, ss, M.x86.R_SS);
306    }
307#else
308    uint64_t guest_cs_base;
309    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_CS_BASE, &guest_cs_base);
310    if ((guest_cs_base >> 4) != M.x86.R_CS) {
311        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, CS, M.x86.R_CS);
312    }
313    uint64_t guest_ds_base;
314    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_DS_BASE, &guest_ds_base);
315    if ((guest_ds_base >> 4) != M.x86.R_DS) {
316        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, DS, M.x86.R_DS);
317    }
318    uint64_t guest_es_base;
319    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_ES_BASE, &guest_es_base);
320    if ((guest_es_base >> 4) != M.x86.R_ES) {
321        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, ES, M.x86.R_ES);
322    }
323    uint64_t guest_fs_base;
324    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_FS_BASE, &guest_fs_base);
325    if ((guest_fs_base >> 4) != M.x86.R_FS) {
326        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, FS, M.x86.R_FS);
327    }
328    uint64_t guest_gs_base;
329    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_GS_BASE, &guest_gs_base);
330    if ((guest_gs_base >> 4) != M.x86.R_GS) {
331        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, GS, M.x86.R_GS);
332    }
333    uint64_t guest_ss_base;
334    err += invoke_dispatcher_vmread(g->dcb_cap, VMX_GUEST_SS_BASE, &guest_ss_base);
335    if ((guest_ss_base >> 4) != M.x86.R_SS) {
336        VMCS_WRITE_SEGREG_REALMODE(g->dcb_cap, SS, M.x86.R_SS);
337    }
338    assert(err_is_ok(err));
339#endif
340    env = NULL;
341}
342
343#ifndef CONFIG_SVM
344// Return true if the "Descriptor-table exiting" Secondary Processor-based control
345// is set in the VMCS, else false.
346static inline bool vmx_desc_table_exiting(void) {
347    uint64_t sec_proc_ctrls;
348    errval_t err = invoke_dispatcher_vmread(env->dcb_cap, VMX_EXEC_SEC_PROC, &sec_proc_ctrls);
349    assert(err_is_ok(err));
350    if (sec_proc_ctrls & SP_CLTS_DESC_TABLE) {
351        uint64_t prim_proc_ctrls;
352	err += invoke_dispatcher_vmread(env->dcb_cap, VMX_EXEC_PRIM_PROC, &prim_proc_ctrls);
353	assert(err_is_ok(err));
354	assert(prim_proc_ctrls & PP_CLTS_SEC_CTLS);
355	return true;
356    }
357    return false;
358}
359#endif
360
361int
362realmode_exec (void)
363{
364    assert(env != NULL);
365
366    valid_exit = false;
367
368    // run the simulator
369    X86EMU_exec ();
370
371    // examine halt reason
372    if (valid_exit) {
373        return REALMODE_ERR_OK;
374    }
375
376    uint8_t *code = (uint8_t *)(M.mem_base + (M.x86.R_CS << 4) + M.x86.R_EIP);
377
378    // check for two byte operation
379    if (code[-2] == 0x0f) {
380        union x86_modrm mod;
381
382        // move EIP back to the start of the instruction
383        M.x86.R_EIP -= 2;
384        mod.raw = code[0];
385
386        // check for LGDT
387        if (code[-1] == 0x01 && mod.u.regop == 2) {
388            // handle instruction
389#ifdef CONFIG_SVM
390            if (amd_vmcb_intercepts_rd(&env->vmcb).wrgdtr == 1) {
391                set_vmcb_exit(&env->vmcb, SVM_VMEXIT_GDTR_WRITE, 0, 0);
392#else
393	    if (vmx_desc_table_exiting()) {
394	        saved_exit_reason = VMX_EXIT_REASON_GDTR_IDTR;
395#endif
396                return REALMODE_ERR_OK;
397            } else {
398                assert(!"LGTR not intercepted");
399            }
400        }
401        // check for LIDT
402        if (code[-1] == 0x01 && mod.u.regop == 3) {
403#ifdef CONFIG_SVM
404            if (amd_vmcb_intercepts_rd(&env->vmcb).wridtr == 1) {
405                set_vmcb_exit(&env->vmcb, SVM_VMEXIT_IDTR_WRITE, 0, 0);
406#else
407	    if (vmx_desc_table_exiting()) {
408	        saved_exit_reason = VMX_EXIT_REASON_GDTR_IDTR;
409#endif
410                return REALMODE_ERR_OK;
411            } else {
412                assert(!"LITR not intercepted");
413            }
414        }
415        // check for CR access
416        else if (code[-1] == 0x20 || code[-1] == 0x22) {
417            if (mod.u.regop != 0) {
418                assert(!"realmode: only access to CR0 are allowed atm");
419            }
420#ifndef CONFIG_SVM
421	    union x86_modrm mod2;
422	    mod2.raw = code[2];
423#endif
424            if (code[-1] == 0x20) { // check for read
425#ifdef CONFIG_SVM
426                if (amd_vmcb_cr_access_rd(&env->vmcb).rdcr0 == 1) {
427                    set_vmcb_exit(&env->vmcb, SVM_VMEXIT_CR0_READ, 0, 0);
428                    return REALMODE_ERR_OK;
429                }
430#else
431		saved_exit_qual = (0x1 << 4) | (mod.u.rm << 8);
432		saved_exit_reason = VMX_EXIT_REASON_CR_ACCESS;
433		return REALMODE_ERR_OK;
434#endif
435            } else {
436#ifdef CONFIG_SVM
437                if (amd_vmcb_cr_access_rd(&env->vmcb).wrcr0 == 1) {
438                    set_vmcb_exit(&env->vmcb, SVM_VMEXIT_CR0_WRITE, 0, 0);
439                    return REALMODE_ERR_OK;
440                }
441#else
442		saved_exit_qual = mod.u.rm << 8;
443		saved_exit_reason = VMX_EXIT_REASON_CR_ACCESS;
444
445		return REALMODE_ERR_OK;
446#endif
447            }
448            assert(!"CR0 access not intercepted");
449        }
450        // CPUID
451        else if (code[-1] == 0xa2) {
452#ifdef CONFIG_SVM
453            set_vmcb_exit(&env->vmcb, SVM_VMEXIT_CPUID, 0, 0);
454#else
455	    saved_exit_reason = VMX_EXIT_REASON_CPUID;
456#endif
457            return REALMODE_ERR_OK;
458        }
459    }
460
461    printf("EIP: %08x, code %02x %02x %02x %02x\n", M.x86.R_EIP - 2,
462           code[-2], code[-1], code[0], code[1]);
463    assert(!"Unknown exit condition");
464    return REALMODE_ERR_OK;
465}
466
467/* libx86emu relies on this function */
468void printk(const char *fmt, ...)
469{
470    va_list ap;
471
472    va_start(ap, fmt);
473    vprintf(fmt, ap);
474    va_end(ap);
475}
476