1/**
2 * \file
3 * \brief Before entry into a VM-guest commences, the state of logical processor
4 *  and VMCS are checked to ensure that the following will transpire successfully:
5 *
6 *  - entry into the guest
7 *  - VMX non-root operation after entry
8 *  - exit from the guest
9 *
10 * The guest-state area, host-state area, and VMX controls are checked using these
11 * functions to guarantee that these tests pass.
12 */
13
14/*
15 * Copyright (c) 2014, University of Washington.
16 * All rights reserved.
17 *
18 * This file is distributed under the terms in the attached LICENSE file.
19 * If you do not find this file, copies can be found by writing to:
20 * ETH Zurich D-INFK, CAB F.78, Universitaetstrasse 6, CH-8092 Zurich.
21 * Attn: Systems Group.
22 */
23
24#include <kernel.h>
25#include <paging_kernel_arch.h>
26#include <vmkit.h>
27#include <vmx_checks.h>
28
29#include <dev/ia32_dev.h>
30
31static bool ia32e_guest, unrestricted_guest;
32
33static inline bool is_within_pa_width(uint64_t addr)
34{
35    uint64_t phys_addr_width_mask = pa_width_mask();
36    return ((addr & ~phys_addr_width_mask) == 0);
37}
38
39static inline void check_guest_cr0(void)
40{
41    uint64_t pp_controls;
42    errval_t err = vmread(VMX_EXEC_PRIM_PROC, &pp_controls);
43
44    uint64_t cr0_fixed0 = ia32_vmx_cr0_fixed0_rd(NULL);
45    uint64_t cr0_fixed1 = ia32_vmx_cr0_fixed1_rd(NULL);
46
47    // CR0.NW and CR0.CD are not changed by VM entry, so are not checked
48    cr0_fixed0 &= ~(CR0_NW | CR0_CD);
49    if (unrestricted_guest) {
50        bool sec_ctls_set = sec_ctls_used(pp_controls);
51        assert(sec_ctls_set == true);
52	cr0_fixed0 &= ~(CR0_PE | CR0_PG);
53    }
54
55    uint64_t guest_cr0;
56    err += vmread(VMX_GUEST_CR0, &guest_cr0);
57    assert(err_is_ok(err));
58    assert(((guest_cr0 | cr0_fixed0) & cr0_fixed1) == guest_cr0);
59
60    if (guest_cr0 & CR0_PG) {
61        assert(guest_cr0 & CR0_PE);
62    }
63
64    if (ia32e_guest) {
65        assert(guest_cr0 & CR0_PG);
66    }
67}
68
69static inline void check_guest_cr3(void)
70{
71    uint64_t guest_cr3;
72    errval_t err = vmread(VMX_GUEST_CR3, &guest_cr3);
73    assert(err_is_ok(err));
74    bool within_pa_width = is_within_pa_width(guest_cr3);
75    assert(within_pa_width);
76}
77
78static inline void check_guest_cr4(void)
79{
80    uint64_t guest_cr4;
81    errval_t err = vmread(VMX_GUEST_CR4, &guest_cr4);
82    assert(err_is_ok(err));
83    uint32_t cr4_fixed0 = ia32_vmx_cr4_fixed0_rd(NULL);
84    uint32_t cr4_fixed1 = ia32_vmx_cr4_fixed1_rd(NULL);
85    assert(((guest_cr4 | cr4_fixed0) & cr4_fixed1) == guest_cr4);
86
87    if (ia32e_guest) {
88        assert(guest_cr4 & CR4_PAE);
89    } else {
90        assert((guest_cr4 & CR4_PCIDE) == 0);
91    }
92}
93
94static inline void check_guest_efer(void)
95{
96    uint64_t entry_controls;
97    errval_t err = vmread(VMX_ENTRY_CONTROLS, &entry_controls);
98
99    if (entry_controls & ENTRY_CLTS_LOAD_EFER) {
100        uint64_t guest_efer;
101	err += vmread(VMX_GUEST_EFER_F, &guest_efer);
102
103	// Bits reserved must be set to 0:
104
105	// Bits 1:7
106	assert(((guest_efer >> 1) & 0x7F) == 0);
107	// Bit 9
108	assert(((guest_efer >> 9) & 0x1) == 0);
109	// Bits 12:63
110	assert((guest_efer & ~0xFFF) == 0);
111
112        uint64_t guest_cr0;
113	err += vmread(VMX_GUEST_CR0, &guest_cr0);
114
115	bool lma_set = !!(guest_efer & EFER_LMA);
116	assert(lma_set == ia32e_guest);
117
118	if (guest_cr0 & CR0_PG) {
119	    bool lme_set = !!(guest_efer & EFER_LME);
120	    assert(lma_set == lme_set);
121	}
122    }
123    assert(err_is_ok(err));
124}
125
126static void check_guest_control_registers(void)
127{
128    check_guest_cr0();
129    check_guest_cr3();
130    check_guest_cr4();
131}
132
133static void check_guest_seg_sel(void)
134{
135    // TR
136    uint64_t tr_sel;
137    errval_t err = vmread(VMX_GUEST_TR_SEL, &tr_sel);
138    assert((tr_sel & SEL_TI) == 0);
139
140    // LDTR
141    uint64_t ldtr_access_rights, ldtr_sel;
142    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
143    err += vmread(VMX_GUEST_LDTR_SEL, &ldtr_sel);
144    if (seg_reg_usable(ldtr_access_rights)) {
145        assert((ldtr_sel & SEL_TI) == 0);
146    }
147
148    // SS
149    // The guest will not be in virtual-8086 mode
150    if (!unrestricted_guest) {
151        uint64_t ss_sel, cs_sel;
152	err += vmread(VMX_GUEST_SS_SEL, &ss_sel);
153	err += vmread(VMX_GUEST_CS_SEL, &cs_sel);
154        int ss_rpl = (ss_sel & SEL_RPL);
155	int cs_rpl = (cs_sel & SEL_RPL);
156	assert(ss_rpl == cs_rpl);
157    }
158    assert(err_is_ok(err));
159}
160
161static void check_guest_seg_base(void)
162{
163    // TR, FS, GS
164    uint64_t tr_base, fs_base, gs_base;
165    errval_t err = vmread(VMX_GUEST_TR_BASE, &tr_base);
166    err += vmread(VMX_GUEST_FS_BASE, &fs_base);
167    err += vmread(VMX_GUEST_GS_BASE, &gs_base);
168    bool tr_base_canonical = is_canonical(tr_base);
169    assert(tr_base_canonical);
170
171    bool fs_base_canonical = is_canonical(fs_base);
172    assert(fs_base_canonical);
173
174    bool gs_base_canonical = is_canonical(gs_base);
175    assert(gs_base_canonical);
176
177    // LDTR
178    uint64_t ldtr_access_rights;
179    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
180    if (seg_reg_usable(ldtr_access_rights)) {
181        uint64_t ldtr_base;
182	err += vmread(VMX_GUEST_LDTR_BASE, &ldtr_base);
183	bool ldtr_base_canonical = is_canonical(ldtr_base);
184	assert(ldtr_base_canonical);
185    }
186
187    // CS
188    uint64_t cs_base;
189    err += vmread(VMX_GUEST_CS_BASE, &cs_base);
190    assert((cs_base & ~0xFFFFFFFF) == 0);
191
192    // SS, DS, ES
193    uint64_t ss_access_rights, ds_access_rights, es_access_rights;
194    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
195    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
196    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
197
198    if (seg_reg_usable(ss_access_rights)) {
199        uint64_t ss_base;
200	err += vmread(VMX_GUEST_SS_BASE, &ss_base);
201	bool ss_base_canonical = is_canonical(ss_base);
202	assert(ss_base_canonical);
203    }
204
205    if (seg_reg_usable(ds_access_rights)) {
206        uint64_t ds_base;
207	err += vmread(VMX_GUEST_DS_BASE, &ds_base);
208	bool ds_base_canonical = is_canonical(ds_base);
209	assert(ds_base_canonical);
210    }
211
212    if (seg_reg_usable(es_access_rights)) {
213        uint64_t es_base;
214	err += vmread(VMX_GUEST_ES_BASE, &es_base);
215	bool es_base_canonical = is_canonical(es_base);
216	assert(es_base_canonical);
217    }
218    assert(err_is_ok(err));
219}
220
221static void check_guest_seg_access_type(void)
222{
223    // CS
224    uint64_t cs_access_rights;
225    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
226    int cs_type = seg_access_type(cs_access_rights);
227    if (unrestricted_guest) {
228        assert(cs_type == 3 || cs_type == 9 || cs_type == 11 ||
229	       cs_type == 13 || cs_type == 15);
230    } else {
231        assert(cs_type == 9 || cs_type == 11 || cs_type == 13 ||
232	       cs_type == 15);
233    }
234
235    // SS
236    uint64_t ss_access_rights;
237    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
238    if (seg_reg_usable(ss_access_rights)) {
239        int ss_type = seg_access_type(ss_access_rights);
240	assert(ss_type == 3 || ss_type == 7);
241    }
242
243    // DS
244    uint64_t ds_access_rights;
245    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
246    if (seg_reg_usable(ds_access_rights)) {
247        int ds_type = seg_access_type(ds_access_rights);
248	assert(ds_type & SEG_TYPE_ACCESSED);
249	if (ds_type & SEG_TYPE_CODE_SEGMENT) {
250	    assert(ds_type & SEG_TYPE_READABLE);
251	}
252    }
253
254    // ES
255    uint64_t es_access_rights;
256    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
257    if (seg_reg_usable(es_access_rights)) {
258        int es_type = seg_access_type(es_access_rights);
259	assert(es_type & SEG_TYPE_ACCESSED);
260	if (es_type & SEG_TYPE_CODE_SEGMENT) {
261	    assert(es_type & SEG_TYPE_READABLE);
262	}
263    }
264
265    // FS
266    uint64_t fs_access_rights;
267    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
268    if (seg_reg_usable(fs_access_rights)) {
269        int fs_type = seg_access_type(fs_access_rights);
270	assert(fs_type & SEG_TYPE_ACCESSED);
271	if (fs_type & SEG_TYPE_CODE_SEGMENT) {
272	    assert(fs_type & SEG_TYPE_READABLE);
273	}
274    }
275
276    // GS
277    uint64_t gs_access_rights;
278    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
279    if (seg_reg_usable(gs_access_rights)) {
280        int gs_type = seg_access_type(gs_access_rights);
281	assert(gs_type & SEG_TYPE_ACCESSED);
282	if (gs_type & SEG_TYPE_CODE_SEGMENT) {
283	    assert(gs_type & SEG_TYPE_READABLE);
284	}
285    }
286
287    // TR
288    uint64_t tr_access_rights;
289    err += vmread(VMX_GUEST_TR_ACCESS, &tr_access_rights);
290    int tr_type = seg_access_type(tr_access_rights);
291    if (ia32e_guest) {
292        assert(tr_type == 11);
293    } else {
294        assert(tr_type == 3 || tr_type == 11);
295    }
296
297    // LDTR
298    uint64_t ldtr_access_rights;
299    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
300    if (seg_reg_usable(ldtr_access_rights)) {
301        int ldtr_type = seg_access_type(ldtr_access_rights);
302        assert(ldtr_type == 2);
303    }
304    assert(err_is_ok(err));
305}
306
307static void check_guest_seg_access_s(void)
308{
309    // CS
310    uint64_t cs_access_rights;
311    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
312    int cs_s = seg_access_s(cs_access_rights);
313    assert(cs_s == 1);
314
315    // SS
316    uint64_t ss_access_rights;
317    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
318    if (seg_reg_usable(ss_access_rights)) {
319        int ss_s = seg_access_s(ss_access_rights);
320	assert(ss_s == 1);
321    }
322
323    // DS
324    uint64_t ds_access_rights;
325    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
326    if (seg_reg_usable(ds_access_rights)) {
327        int ds_s = seg_access_s(ds_access_rights);
328	assert(ds_s == 1);
329    }
330
331    // ES
332    uint64_t es_access_rights;
333    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
334    if (seg_reg_usable(es_access_rights)) {
335        int es_s = seg_access_s(es_access_rights);
336	assert(es_s == 1);
337    }
338
339    // FS
340    uint64_t fs_access_rights;
341    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
342    if (seg_reg_usable(fs_access_rights)) {
343        int fs_s = seg_access_s(fs_access_rights);
344	assert(fs_s == 1);
345    }
346
347    // GS
348    uint64_t gs_access_rights;
349    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
350    if (seg_reg_usable(gs_access_rights)) {
351        int gs_s = seg_access_s(gs_access_rights);
352	assert(gs_s == 1);
353    }
354
355    // TR
356    uint64_t tr_access_rights;
357    err += vmread(VMX_GUEST_TR_ACCESS, &tr_access_rights);
358    int tr_s = seg_access_s(tr_access_rights);
359    assert(tr_s == 0);
360
361    // LDTR
362    uint64_t ldtr_access_rights;
363    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
364    if (seg_reg_usable(ldtr_access_rights)) {
365	int ldtr_s = seg_access_s(ldtr_access_rights);
366	assert(ldtr_s == 0);
367    }
368    assert(err_is_ok(err));
369}
370
371static void check_guest_seg_access_dpl(void)
372{
373    // CS
374    uint64_t cs_access_rights;
375    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
376    int cs_dpl = seg_access_dpl(cs_access_rights);
377
378    int cs_type = seg_access_type(cs_access_rights);
379    if (cs_type == 3) {
380	assert(unrestricted_guest);
381	assert(cs_dpl == 0);
382    }
383
384    uint64_t ss_access_rights;
385    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
386    int ss_dpl = seg_access_dpl(ss_access_rights);
387
388    if (cs_type == 9 || cs_type == 11) {
389        assert(cs_dpl == ss_dpl);
390    }
391
392    if (cs_type == 13 || cs_type == 15) {
393        assert(cs_dpl <= ss_dpl);
394    }
395
396    // SS
397    if (!unrestricted_guest) {
398        uint64_t ss_sel;
399	err += vmread(VMX_GUEST_SS_SEL, &ss_sel);
400	int ss_rpl = (ss_sel & SEL_RPL);
401	assert(ss_rpl == ss_dpl);
402    }
403
404    uint64_t guest_cr0;
405    err += vmread(VMX_GUEST_CR0, &guest_cr0);
406
407    if (cs_type == 3 || (guest_cr0 & CR0_PE) == 0) {
408        assert(ss_dpl == 0);
409    }
410
411    // DS
412    uint64_t ds_access_rights;
413    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
414    bool ds_usable = seg_reg_usable(ds_access_rights);
415    int ds_type = seg_access_type(ds_access_rights);
416
417    if (!unrestricted_guest && ds_usable && (0 <= ds_type && ds_type <= 11)) {
418        uint64_t ds_sel;
419	err += vmread(VMX_GUEST_DS_SEL, &ds_sel);
420	int ds_rpl = (ds_sel & SEL_RPL);
421	int ds_dpl = seg_access_dpl(ds_access_rights);
422
423	assert(ds_dpl >= ds_rpl);
424    }
425
426    // ES
427    uint64_t es_access_rights;
428    err += vmread(VMX_GUEST_DS_ACCESS, &es_access_rights);
429    bool es_usable = seg_reg_usable(es_access_rights);
430    int es_type = seg_access_type(es_access_rights);
431
432    if (!unrestricted_guest && es_usable && (0 <= es_type && es_type <= 11)) {
433        uint64_t es_sel;
434	err += vmread(VMX_GUEST_ES_SEL, &es_sel);
435	int es_rpl = (es_sel & SEL_RPL);
436	int es_dpl = seg_access_dpl(es_access_rights);
437
438	assert(es_dpl >= es_rpl);
439    }
440
441    // FS
442    uint64_t fs_access_rights;
443    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
444    bool fs_usable = seg_reg_usable(fs_access_rights);
445    int fs_type = seg_access_type(fs_access_rights);
446
447    if (!unrestricted_guest && fs_usable && (0 <= fs_type && fs_type <= 11)) {
448        uint64_t fs_sel;
449	err += vmread(VMX_GUEST_FS_SEL, &fs_sel);
450	int fs_rpl = (fs_sel & SEL_RPL);
451	int fs_dpl = seg_access_dpl(fs_access_rights);
452
453	assert(fs_dpl >= fs_rpl);
454    }
455
456    // GS
457    uint64_t gs_access_rights;
458    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
459    bool gs_usable = seg_reg_usable(gs_access_rights);
460    int gs_type = seg_access_type(gs_access_rights);
461
462    if (!unrestricted_guest && gs_usable && (0 <= gs_type && gs_type <= 11)) {
463        uint64_t gs_sel;
464	err += vmread(VMX_GUEST_GS_SEL, &gs_sel);
465	int gs_rpl = (gs_sel & SEL_RPL);
466	int gs_dpl = seg_access_dpl(gs_access_rights);
467
468	assert(gs_dpl >= gs_rpl);
469    }
470    assert(err_is_ok(err));
471}
472
473static void check_guest_seg_access_p(void)
474{
475    // CS
476    uint64_t cs_access_rights;
477    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
478    int cs_p = seg_access_p(cs_access_rights);
479    assert(cs_p == 1);
480
481    // SS
482    uint64_t ss_access_rights;
483    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
484    if (seg_reg_usable(ss_access_rights)) {
485        int ss_p = seg_access_p(ss_access_rights);
486	assert(ss_p == 1);
487    }
488
489    // DS
490    uint64_t ds_access_rights;
491    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
492    if (seg_reg_usable(ds_access_rights)) {
493        int ds_p = seg_access_p(ds_access_rights);
494	assert(ds_p == 1);
495    }
496
497    // ES
498    uint64_t es_access_rights;
499    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
500    if (seg_reg_usable(es_access_rights)) {
501        int es_p = seg_access_p(es_access_rights);
502	assert(es_p == 1);
503    }
504
505    // FS
506    uint64_t fs_access_rights;
507    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
508    if (seg_reg_usable(fs_access_rights)) {
509        int fs_p = seg_access_p(fs_access_rights);
510	assert(fs_p == 1);
511    }
512
513    // GS
514    uint64_t gs_access_rights;
515    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
516    if (seg_reg_usable(gs_access_rights)) {
517        int gs_p = seg_access_p(gs_access_rights);
518	assert(gs_p == 1);
519    }
520
521    // TR
522    uint64_t tr_access_rights;
523    err += vmread(VMX_GUEST_TR_ACCESS, &tr_access_rights);
524    int tr_p = seg_access_p(tr_access_rights);
525    assert(tr_p == 1);
526
527    // LDTR
528    uint64_t ldtr_access_rights;
529    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
530    if (seg_reg_usable(ldtr_access_rights)) {
531	int ldtr_p = seg_access_p(ldtr_access_rights);
532	assert(ldtr_p == 1);
533    }
534    assert(err_is_ok(err));
535}
536
537static void check_guest_seg_access_db(void)
538{
539    // CS
540    uint64_t cs_access_rights;
541    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
542    assert(err_is_ok(err));
543    int cs_l = seg_access_l(cs_access_rights);
544    if (ia32e_guest && (cs_l == 1)) {
545        int cs_db = seg_access_db(cs_access_rights);
546	assert(cs_db == 0);
547    }
548}
549
550static void check_guest_seg_access_g(void)
551{
552    // CS
553    uint64_t cs_access_rights;
554    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
555    int cs_g = seg_access_g(cs_access_rights);
556
557    uint64_t cs_lim;
558    err += vmread(VMX_GUEST_CS_LIM, &cs_lim);
559    // If there is at least one bit in the range 0:11 that is set to 0
560    if ((cs_lim & 0xFFF) ^ 0xFFF) {
561        assert(cs_g == 0);
562    }
563    // If there is at least one bit in the range 20:31 that is set to 1
564    if ((cs_lim >> 20) & 0xFFF) {
565        assert(cs_g == 1);
566    }
567
568    // SS
569    uint64_t ss_access_rights;
570    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
571    if (seg_reg_usable(ss_access_rights)) {
572        int ss_g = seg_access_g(ss_access_rights);
573	uint64_t ss_lim;
574	err += vmread(VMX_GUEST_SS_LIM, &ss_lim);
575	// If there is at least one bit in the range 0:11 that is set to 0
576	if ((ss_lim & 0xFFF) ^ 0xFFF) {
577	    assert(ss_g == 0);
578	}
579	// If there is at least one bit in the range 20:31 that is set to 1
580	if ((ss_lim >> 20) & 0xFFF) {
581	    assert(ss_g == 1);
582	}
583    }
584
585    // DS
586    uint64_t ds_access_rights;
587    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
588    if (seg_reg_usable(ds_access_rights)) {
589        int ds_g = seg_access_g(ds_access_rights);
590	uint64_t ds_lim;
591	err += vmread(VMX_GUEST_DS_LIM, &ds_lim);
592	// If there is at least one bit in the range 0:11 that is set to 0
593	if ((ds_lim & 0xFFF) ^ 0xFFF) {
594	    assert(ds_g == 0);
595	}
596	// If there is at least one bit in the range 20:31 that is set to 1
597	if ((ds_lim >> 20) & 0xFFF) {
598	    assert(ds_g == 1);
599	}
600    }
601
602    // ES
603    uint64_t es_access_rights;
604    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
605    if (seg_reg_usable(es_access_rights)) {
606        int es_g = seg_access_g(es_access_rights);
607	uint64_t es_lim;
608	err += vmread(VMX_GUEST_ES_LIM, &es_lim);
609	// If there is at least one bit in the range 0:11 that is set to 0
610	if ((es_lim & 0xFFF) ^ 0xFFF) {
611	    assert(es_g == 0);
612	}
613	// If there is at least one bit in the range 20:31 that is set to 1
614	if ((es_lim >> 20) & 0xFFF) {
615	    assert(es_g == 1);
616	}
617    }
618
619    // FS
620    uint64_t fs_access_rights;
621    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
622    if (seg_reg_usable(fs_access_rights)) {
623        int fs_g = seg_access_g(fs_access_rights);
624	uint64_t fs_lim;
625	err += vmread(VMX_GUEST_FS_LIM, &fs_lim);
626	// If there is at least one bit in the range 0:11 that is set to 0
627	if ((fs_lim & 0xFFF) ^ 0xFFF) {
628	    assert(fs_g == 0);
629	}
630	// If there is at least one bit in the range 20:31 that is set to 1
631	if ((fs_lim >> 20) & 0xFFF) {
632	    assert(fs_g == 1);
633	}
634    }
635
636    // GS
637    uint64_t gs_access_rights;
638    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
639    if (seg_reg_usable(gs_access_rights)) {
640        int gs_g = seg_access_g(gs_access_rights);
641	uint64_t gs_lim;
642	err += vmread(VMX_GUEST_GS_LIM, &gs_lim);
643	// If there is at least one bit in the range 0:11 that is set to 0
644	if ((gs_lim & 0xFFF) ^ 0xFFF) {
645	    assert(gs_g == 0);
646	}
647	// If there is at least one bit in the range 20:31 that is set to 1
648	if ((gs_lim >> 20) & 0xFFF) {
649	    assert(gs_g == 1);
650	}
651    }
652
653    // TR
654    uint64_t tr_access_rights;
655    err += vmread(VMX_GUEST_TR_ACCESS, &tr_access_rights);
656    if (seg_reg_usable(tr_access_rights)) {
657        int tr_g = seg_access_g(tr_access_rights);
658	uint64_t tr_lim;
659	err += vmread(VMX_GUEST_TR_LIM, &tr_lim);
660	// If there is at least one bit in the range 0:11 that is set to 0
661	if ((tr_lim & 0xFFF) ^ 0xFFF) {
662	    assert(tr_g == 0);
663	}
664	// If there is at least one bit in the range 20:31 that is set to 1
665	if ((tr_lim >> 20) & 0xFFF) {
666	    assert(tr_g == 1);
667	}
668    }
669
670    // LDTR
671    uint64_t ldtr_access_rights;
672    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
673    if (seg_reg_usable(ldtr_access_rights)) {
674        int ldtr_g = seg_access_g(ldtr_access_rights);
675	uint64_t ldtr_lim;
676	err += vmread(VMX_GUEST_LDTR_LIM, &ldtr_lim);
677	// If there is at least one bit in the range 0:11 that is set to 0
678	if ((ldtr_lim & 0xFFF) ^ 0xFFF) {
679	    assert(ldtr_g == 0);
680	}
681	// If there is at least one bit in the range 20:31 that is set to 1
682	if ((ldtr_lim >> 20) & 0xFFF) {
683	    assert(ldtr_g == 1);
684	}
685    }
686    assert(err_is_ok(err));
687}
688
689static void check_guest_seg_access_rsvd(void)
690{
691    // CS
692    uint64_t cs_access_rights;
693    errval_t err = vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
694    int cs_rsvd_low = ((cs_access_rights >> 8) & 0xF);
695    int cs_rsvd_high = ((cs_access_rights >> 17) & 0x7FFF);
696    assert(cs_rsvd_low == 0);
697    assert(cs_rsvd_high == 0);
698
699    // SS
700    uint64_t ss_access_rights;
701    err += vmread(VMX_GUEST_SS_ACCESS, &ss_access_rights);
702    if (seg_reg_usable(ss_access_rights)) {
703        int ss_rsvd_low = ((ss_access_rights >> 8) & 0xF);
704	int ss_rsvd_high = ((ss_access_rights >> 17) & 0x7FFF);
705	assert(ss_rsvd_low == 0);
706	assert(ss_rsvd_high == 0);
707    }
708
709    // DS
710    uint64_t ds_access_rights;
711    err += vmread(VMX_GUEST_DS_ACCESS, &ds_access_rights);
712    if (seg_reg_usable(ds_access_rights)) {
713        int ds_rsvd_low = ((ds_access_rights >> 8) & 0xF);
714	int ds_rsvd_high = ((ds_access_rights >> 17) & 0x7FFF);
715	assert(ds_rsvd_low == 0);
716	assert(ds_rsvd_high == 0);
717    }
718
719    // ES
720    uint64_t es_access_rights;
721    err += vmread(VMX_GUEST_ES_ACCESS, &es_access_rights);
722    if (seg_reg_usable(es_access_rights)) {
723        int es_rsvd_low = ((es_access_rights >> 8) & 0xF);
724	int es_rsvd_high = ((es_access_rights >> 17) & 0x7FFF);
725	assert(es_rsvd_low == 0);
726	assert(es_rsvd_high == 0);
727    }
728
729    // FS
730    uint64_t fs_access_rights;
731    err += vmread(VMX_GUEST_FS_ACCESS, &fs_access_rights);
732    if (seg_reg_usable(fs_access_rights)) {
733        int fs_rsvd_low = ((fs_access_rights >> 8) & 0xF);
734	int fs_rsvd_high = ((fs_access_rights >> 17) & 0x7FFF);
735	assert(fs_rsvd_low == 0);
736	assert(fs_rsvd_high == 0);
737    }
738
739    // GS
740    uint64_t gs_access_rights;
741    err += vmread(VMX_GUEST_GS_ACCESS, &gs_access_rights);
742    if (seg_reg_usable(gs_access_rights)) {
743        int gs_rsvd_low = ((gs_access_rights >> 8) & 0xF);
744	int gs_rsvd_high = ((gs_access_rights >> 17) & 0x7FFF);
745	assert(gs_rsvd_low == 0);
746	assert(gs_rsvd_high == 0);
747    }
748
749    // TR
750    uint64_t tr_access_rights;
751    err += vmread(VMX_GUEST_TR_ACCESS, &tr_access_rights);
752    int tr_rsvd_low = ((tr_access_rights >> 8) & 0xF);
753    int tr_rsvd_high = ((tr_access_rights >> 17) & 0x7FFF);
754    assert(tr_rsvd_low == 0);
755    assert(tr_rsvd_high == 0);
756
757    // LDTR
758    uint64_t ldtr_access_rights;
759    err += vmread(VMX_GUEST_LDTR_ACCESS, &ldtr_access_rights);
760    if (seg_reg_usable(ldtr_access_rights)) {
761	int ldtr_rsvd_low = ((ldtr_access_rights >> 8) & 0xF);
762	int ldtr_rsvd_high = ((ldtr_access_rights >> 17) & 0x7FFF);
763	assert(ldtr_rsvd_low == 0);
764	assert(ldtr_rsvd_high == 0);
765    }
766    assert(err_is_ok(err));
767}
768
769// The guest will not be virtual-8086
770static void check_guest_seg_access_rights(void)
771{
772    check_guest_seg_access_type();
773    check_guest_seg_access_s();
774    check_guest_seg_access_dpl();
775    check_guest_seg_access_p();
776    check_guest_seg_access_db();
777    check_guest_seg_access_g();
778    check_guest_seg_access_rsvd();
779}
780
781static void check_guest_seg_regs(void)
782{
783    check_guest_seg_sel();
784    check_guest_seg_base();
785    check_guest_seg_access_rights();
786}
787
788static void check_guest_desc_table_regs(void)
789{
790    uint64_t gdtr_base, idtr_base;
791    errval_t err = vmread(VMX_GUEST_GDTR_BASE, &gdtr_base);
792    err += vmread(VMX_GUEST_IDTR_BASE, &idtr_base);
793    bool gdtr_base_canonical = is_canonical(gdtr_base);
794    assert(gdtr_base_canonical);
795
796    bool idtr_base_canonical = is_canonical(idtr_base);
797    assert(idtr_base_canonical);
798
799    uint64_t gdtr_lim, idtr_lim;
800    err += vmread(VMX_GUEST_GDTR_LIM, &gdtr_lim);
801    err += vmread(VMX_GUEST_IDTR_LIM, &idtr_lim);
802    assert(err_is_ok(err));
803    assert((gdtr_lim & 0xFFFF0000) == 0);
804    assert((idtr_lim & 0xFFFF0000) == 0);
805}
806
807static void check_guest_rip(void)
808{
809    uint64_t guest_rip;
810    errval_t err = vmread(VMX_GUEST_RIP, &guest_rip);
811
812    // Guest RIP
813    uint64_t cs_access_rights;
814    err += vmread(VMX_GUEST_CS_ACCESS, &cs_access_rights);
815    int cs_l = seg_access_l(cs_access_rights);
816    if ((ia32e_guest == false) || (cs_l == 0)) {
817        assert((guest_rip & ~0xFFFFFFFF) == 0);
818    } else {
819      // bits N:63 must be identical, if the process supports N < 64
820      // linear-address bits
821        assert((guest_rip & ~pa_width_mask()) == 0 ||
822	       ~(guest_rip & ~pa_width_mask()) == pa_width_mask());
823    }
824    assert(err_is_ok(err));
825}
826
827static void check_guest_rflags(void)
828{
829    // Guest RFLAGS
830    uint64_t guest_rflags;
831    errval_t err = vmread(VMX_GUEST_RFLAGS, &guest_rflags);
832
833    bool virtual_8086_guest = !!(guest_rflags & RFLAGS_VM);
834    assert(virtual_8086_guest == false);
835
836    // Reserved bits 22:63 must be zero
837    assert((guest_rflags & ~0x3FFFFF) == 0);
838
839    // Bits 3, 5, and 15 must be zero
840    assert((guest_rflags & (1 << 3UL)) == 0);
841    assert((guest_rflags & (1 << 5UL)) == 0);
842    assert((guest_rflags & (1 << 15UL)) == 0);
843
844    // Reserved bit 1 must be 1
845    assert(guest_rflags & (1 << 1UL));
846
847    uint64_t guest_cr0;
848    err += vmread(VMX_GUEST_CR0, &guest_cr0);
849    if (ia32e_guest || ((guest_rflags & CR0_PE) == 0)) {
850        assert((guest_rflags & RFLAGS_VM) == 0);
851    }
852
853    uint64_t intr_info;
854    err += vmread(VMX_EXIT_INTR_INFO, &intr_info);
855    bool intr_valid_bit = !!(intr_info & (1 << 31UL));
856    int intr_type = ((intr_info >> 8) & 0x3);
857    if (intr_valid_bit && intr_type == INTR_TYPE_EXT_INTR) {
858        assert(guest_rflags & RFLAGS_IF);
859    }
860    assert(err_is_ok(err));
861}
862
863void check_guest_state_area(void)
864{
865    uint64_t entry_controls, sec_ctls;
866    errval_t err = vmread(VMX_ENTRY_CONTROLS, &entry_controls);
867    err += vmread(VMX_EXEC_SEC_PROC, &sec_ctls);
868
869    ia32e_guest = !!(entry_controls & ENTRY_CLTS_IA32E_MODE);
870    unrestricted_guest = !!(sec_ctls & SP_CLTS_UNRSTD_GUEST);
871
872    check_guest_control_registers();
873
874    uint64_t sysenter_esp, sysenter_eip;
875    err += vmread(VMX_GUEST_SYSENTER_ESP, &sysenter_esp);
876    err += vmread(VMX_GUEST_SYSENTER_EIP, &sysenter_eip);
877    assert(err_is_ok(err));
878
879    bool sysenter_esp_canonical = is_canonical(sysenter_esp);
880    assert(sysenter_esp_canonical);
881    bool sysenter_eip_canonical = is_canonical(sysenter_eip);
882    assert(sysenter_eip_canonical);
883
884    check_guest_efer();
885    check_guest_seg_regs();
886    check_guest_desc_table_regs();
887
888    check_guest_rip();
889    check_guest_rflags();
890}
891
892static void check_host_cr0(void)
893{
894    uint64_t host_cr0;
895    errval_t err = vmread(VMX_HOST_CR0, &host_cr0);
896    assert(err_is_ok(err));
897
898    uint64_t cr0_fixed0 = ia32_vmx_cr0_fixed0_rd(NULL);
899    uint64_t cr0_fixed1 = ia32_vmx_cr0_fixed1_rd(NULL);
900    assert(((host_cr0 | cr0_fixed0) & cr0_fixed1) == host_cr0);
901}
902
903static void check_host_cr3(void)
904{
905    uint64_t host_cr3;
906    errval_t err = vmread(VMX_HOST_CR3, &host_cr3);
907    assert(err_is_ok(err));
908
909    bool within_pa_width = is_within_pa_width(host_cr3);
910    assert(within_pa_width);
911}
912
913static void check_host_cr4(void)
914{
915    uint64_t exit_controls;
916    errval_t err = vmread(VMX_EXIT_CONTROLS, &exit_controls);
917
918    uint64_t host_cr4;
919    err += vmread(VMX_HOST_CR4, &host_cr4);
920    assert(err_is_ok(err));
921
922    uint32_t cr4_fixed0 = ia32_vmx_cr4_fixed0_rd(NULL);
923    uint32_t cr4_fixed1 = ia32_vmx_cr4_fixed1_rd(NULL);
924    assert(((host_cr4 | cr4_fixed0) & cr4_fixed1) == host_cr4);
925
926    bool host_size_set = !!(exit_controls & EXIT_CLTS_HOST_SIZE);
927    if (host_size_set) {
928        assert(host_cr4 & CR4_PAE);
929    } else {
930        assert((host_cr4 & CR4_PCIDE) == 0);
931    }
932}
933
934static void check_host_control_registers(void)
935{
936    check_host_cr0();
937    check_host_cr3();
938    check_host_cr4();
939}
940
941static void check_host_efer(void)
942{
943    uint64_t exit_controls;
944    errval_t err = vmread(VMX_EXIT_CONTROLS, &exit_controls);
945    if (exit_controls & EXIT_CLTS_LOAD_EFER) {
946        uint64_t host_efer;
947	err += vmread(VMX_HOST_EFER_F, &host_efer);
948	// Bits reserved must be set to 0:
949
950	// Bits 1:7
951	assert(((host_efer >> 1) & 0x7F) == 0);
952	// Bit 9
953	assert(((host_efer >> 9) & 0x1) == 0);
954	// Bits 12:63
955	assert((host_efer & ~0xFFF) == 0);
956
957	bool host_size_set = !!(exit_controls & EXIT_CLTS_HOST_SIZE);
958	bool lma_set = !!(host_efer & EFER_LMA);
959	assert(lma_set == host_size_set);
960
961	bool lme_set = !!(host_efer & EFER_LME);
962	assert(lme_set == host_size_set);
963    }
964    assert(err_is_ok(err));
965}
966
967static void check_host_seg_sel(void)
968{
969    // CS
970    uint64_t cs_sel;
971    errval_t err = vmread(VMX_HOST_CS_SEL, &cs_sel);
972    int cs_rpl = (cs_sel & SEL_RPL);
973    int cs_ti = (cs_sel & SEL_TI);
974    assert(cs_rpl == 0);
975    assert(cs_ti == 0);
976    assert(cs_sel != 0);
977
978    // SS
979    uint64_t ss_sel;
980    err += vmread(VMX_HOST_SS_SEL, &ss_sel);
981    int ss_rpl = (ss_sel & SEL_RPL);
982    int ss_ti = (ss_sel & SEL_TI);
983    assert(ss_rpl == 0);
984    assert(ss_ti == 0);
985
986    uint64_t exit_controls;
987    err += vmread(VMX_EXIT_CONTROLS, &exit_controls);
988    bool host_size_set = !!(exit_controls & EXIT_CLTS_HOST_SIZE);
989    if (!host_size_set) {
990        assert(ss_sel != 0);
991    }
992
993    // DS
994    uint64_t ds_sel;
995    err += vmread(VMX_HOST_DS_SEL, &ds_sel);
996    int ds_rpl = (ds_sel & SEL_RPL);
997    int ds_ti = (ds_sel & SEL_TI);
998    assert(ds_rpl == 0);
999    assert(ds_ti == 0);
1000
1001    // ES
1002    uint64_t es_sel;
1003    err += vmread(VMX_HOST_ES_SEL, &es_sel);
1004    int es_rpl = (es_sel & SEL_RPL);
1005    int es_ti = (es_sel & SEL_TI);
1006    assert(es_rpl == 0);
1007    assert(es_ti == 0);
1008
1009    // FS
1010    uint64_t fs_sel;
1011    err += vmread(VMX_HOST_FS_SEL, &fs_sel);
1012    int fs_rpl = (fs_sel & SEL_RPL);
1013    int fs_ti = (fs_sel & SEL_TI);
1014    assert(fs_rpl == 0);
1015    assert(fs_ti == 0);
1016
1017    // GS
1018    uint64_t gs_sel;
1019    err += vmread(VMX_HOST_GS_SEL, &gs_sel);
1020    int gs_rpl = (gs_sel & SEL_RPL);
1021    int gs_ti = (gs_sel & SEL_TI);
1022    assert(gs_rpl == 0);
1023    assert(gs_ti == 0);
1024
1025    // TR
1026    uint64_t tr_sel;
1027    err += vmread(VMX_HOST_TR_SEL, &tr_sel);
1028    assert(err_is_ok(err));
1029
1030    int tr_rpl = (tr_sel & SEL_RPL);
1031    int tr_ti = (tr_sel & SEL_TI);
1032    assert(tr_rpl == 0);
1033    assert(tr_ti == 0);
1034    assert(tr_sel != 0);
1035}
1036
1037static void check_host_seg_base(void)
1038{
1039    uint64_t fs_base;
1040    errval_t err = vmread(VMX_HOST_FS_BASE, &fs_base);
1041    bool fs_base_canonical = is_canonical(fs_base);
1042    assert(fs_base_canonical);
1043
1044    uint64_t gs_base;
1045    err += vmread(VMX_GUEST_GS_BASE, &gs_base);
1046    bool gs_base_canonical = is_canonical(gs_base);
1047    assert(gs_base_canonical);
1048
1049    uint64_t tr_base;
1050    err += vmread(VMX_GUEST_TR_BASE, &tr_base);
1051    assert(err_is_ok(err));
1052
1053    bool tr_base_canonical = is_canonical(tr_base);
1054    assert(tr_base_canonical);
1055}
1056
1057static void check_host_seg_regs(void)
1058{
1059    check_host_seg_sel();
1060    check_host_seg_base();
1061}
1062
1063static void check_host_desc_table_regs(void)
1064{
1065    uint64_t idtr_base;
1066    errval_t err = vmread(VMX_GUEST_IDTR_BASE, &idtr_base);
1067    bool idtr_base_canonical = is_canonical(idtr_base);
1068    assert(idtr_base_canonical);
1069
1070    uint64_t gdtr_base;
1071    err += vmread(VMX_GUEST_GDTR_BASE, &gdtr_base);
1072    assert(err_is_ok(err));
1073
1074    bool gdtr_base_canonical = is_canonical(gdtr_base);
1075    assert(gdtr_base_canonical);
1076}
1077
1078static void check_host_rip(void)
1079{
1080    uint64_t host_rip;
1081    errval_t err = vmread(VMX_HOST_RIP, &host_rip);
1082
1083    uint64_t exit_controls;
1084    err += vmread(VMX_EXIT_CONTROLS, &exit_controls);
1085    assert(err_is_ok(err));
1086
1087    bool host_size_set = !!(exit_controls & EXIT_CLTS_HOST_SIZE);
1088    if (host_size_set) {
1089        bool rip_canonical = is_canonical(host_rip);
1090	assert(rip_canonical);
1091    } else {
1092        assert((host_rip & ~0xFFFFFFFF) == 0);
1093    }
1094}
1095
1096void check_host_state_area(void)
1097{
1098    check_host_control_registers();
1099
1100    uint64_t sysenter_esp, sysenter_eip;
1101    errval_t err = vmread(VMX_HOST_SYSENTER_ESP, &sysenter_esp);
1102    err += vmread(VMX_HOST_SYSENTER_EIP, &sysenter_eip);
1103    assert(err_is_ok(err));
1104
1105    bool sysenter_esp_canonical = is_canonical(sysenter_esp);
1106    assert(sysenter_esp_canonical);
1107    bool sysenter_eip_canonical = is_canonical(sysenter_eip);
1108    assert(sysenter_eip_canonical);
1109
1110    check_host_efer();
1111    check_host_seg_regs();
1112    check_host_desc_table_regs();
1113
1114    check_host_rip();
1115}
1116
1117// Checks pertaining to the "Enable EPT" and "EPT-violation #VE" controls
1118static void check_vmx_controls_ept(void)
1119{
1120    bool within_pa_width;
1121    uint64_t pp_controls, sp_controls;
1122    errval_t err = vmread(VMX_EXEC_PRIM_PROC, &pp_controls);
1123    err += vmread(VMX_EXEC_SEC_PROC, &sp_controls);
1124
1125    if (unrestricted_guest) {
1126        assert(sp_controls & SP_CLTS_ENABLE_EPT);
1127    }
1128
1129    if (sp_controls & SP_CLTS_ENABLE_EPT) {
1130        assert(pp_controls & PP_CLTS_SEC_CTLS);
1131
1132	uint64_t eptp;
1133	err += vmread(VMX_EPTP_F, &eptp);
1134
1135	ia32_vmx_ept_vpid_t ept_vpid_msr = ia32_vmx_ept_vpid_rd(NULL);
1136	int set_ept_type = ept_type(eptp);
1137
1138	switch(set_ept_type) {
1139	case 0x0: // Uncacheable
1140	    assert(ia32_vmx_ept_vpid_ucmt_extract(ept_vpid_msr));
1141	    break;
1142	case 0x4: // Execute-only
1143	    assert(ia32_vmx_ept_vpid_eot_extract(ept_vpid_msr));
1144	    break;
1145	case 0x6: // Write-back
1146	    assert(ia32_vmx_ept_vpid_wbmt_extract(ept_vpid_msr));
1147	    break;
1148	default:
1149	    assert(!"EPT type value is reserved");
1150	}
1151
1152	assert(ia32_vmx_ept_vpid_pwl4_extract(ept_vpid_msr));
1153	assert(ept_page_walk_length(eptp) == 3);
1154
1155	assert(((eptp >> 7) & 0x1F) == 0);
1156        within_pa_width = is_within_pa_width(eptp);
1157	assert(within_pa_width);
1158
1159	bool accessed_dirty = ia32_vmx_ept_vpid_ept_adf_extract(ept_vpid_msr);
1160	if (!accessed_dirty) {
1161	    assert(ept_accessed_dirty_enable(eptp) == 0);
1162	}
1163    }
1164
1165    if (sp_controls & SP_CLTS_EPT_VIOL) {
1166        assert(pp_controls & PP_CLTS_SEC_CTLS);
1167	uint64_t vexcp_info_addr;
1168	err += vmread(VMX_VEXCP_INFO_F, &vexcp_info_addr);
1169	assert((vexcp_info_addr & BASE_PAGE_MASK) == 0);
1170	within_pa_width = is_within_pa_width(vexcp_info_addr);
1171	assert(within_pa_width);
1172    }
1173    assert(err_is_ok(err));
1174}
1175
1176// Checks related to address-space size
1177static void check_vmx_controls_addr_space(void)
1178{
1179    uint64_t exit_controls;
1180    errval_t err = vmread(VMX_EXIT_CONTROLS, &exit_controls);
1181
1182    bool host_size_set = !!(exit_controls & EXIT_CLTS_HOST_SIZE);
1183
1184    uint64_t host_efer;
1185    err += vmread(VMX_HOST_EFER_F, &host_efer);
1186    assert(err_is_ok(err));
1187
1188    if (host_efer & EFER_LMA) {
1189        assert(host_size_set);
1190    } else {
1191        assert(!ia32e_guest);
1192	assert(!host_size_set);
1193    }
1194
1195    if (!host_size_set) {
1196        assert(!ia32e_guest);
1197    }
1198}
1199
1200void check_vmx_controls(void)
1201{
1202    uint64_t msr_bitmap_addr;
1203    errval_t err = vmread(VMX_MSRBMP_F, &msr_bitmap_addr);
1204    assert((msr_bitmap_addr & BASE_PAGE_MASK) == 0);
1205    bool within_pa_width = is_within_pa_width(msr_bitmap_addr);
1206    assert(within_pa_width);
1207
1208    uint64_t io_bitmap_a_addr, io_bitmap_b_addr;
1209    err += vmread(VMX_IOBMP_A_F, &io_bitmap_a_addr);
1210    assert((io_bitmap_a_addr & BASE_PAGE_MASK) == 0);
1211    within_pa_width = is_within_pa_width(io_bitmap_a_addr);
1212    assert(within_pa_width);
1213
1214    err += vmread(VMX_IOBMP_B_F, &io_bitmap_b_addr);
1215    assert((io_bitmap_b_addr & BASE_PAGE_MASK) == 0);
1216    within_pa_width = is_within_pa_width(io_bitmap_b_addr);
1217    assert(within_pa_width);
1218
1219    uint64_t pin_controls, pp_controls, sp_controls;
1220    err += vmread(VMX_EXEC_PIN_BASED, &pin_controls);
1221    err += vmread(VMX_EXEC_PRIM_PROC, &pp_controls);
1222    err += vmread(VMX_EXEC_SEC_PROC, &sp_controls);
1223
1224    uint64_t exit_controls;
1225    err += vmread(VMX_EXIT_CONTROLS, &exit_controls);
1226
1227    if (pp_controls & PP_CLTS_TPR_SHADOW) {
1228        uint64_t vapic_addr;
1229	err += vmread(VMX_VAPIC_F, &vapic_addr);
1230	assert((vapic_addr & BASE_PAGE_MASK) == 0);
1231	within_pa_width = is_within_pa_width(vapic_addr);
1232	assert(within_pa_width);
1233
1234	uint64_t tpr_threshold;
1235	err += vmread(VMX_TPR_THRESHOLD, &tpr_threshold);
1236        if((sp_controls & SP_CLTS_VIRQ_DEL) == 0) {
1237	    // Bits 4:31 must be 0
1238	    assert((tpr_threshold & 0xFFFFFFF0) == 0);
1239	}
1240
1241	if ((sp_controls & SP_CLTS_VIRQ_DEL) == 0 &&
1242	    (sp_controls & SP_CLTS_VIRT_APIC) == 0) {
1243	    lvaddr_t vapic_page = local_phys_to_mem((lpaddr_t)vapic_addr);
1244	    assert(vapic_page != 0);
1245	    // virtual task-priority register is at offset 0x80
1246	    lvaddr_t vtpr_addr = vapic_page + 0x80;
1247	    uint32_t vtpr = *((uint32_t *)vtpr_addr);
1248	    assert((tpr_threshold & 0xF) <= ((vtpr >> 4) & 0xF));
1249	}
1250    } else {
1251        assert((sp_controls & SP_CLTS_VIRT_X2APIC) == 0);
1252	assert((sp_controls & SP_CLTS_VIRT_APIC_REG) == 0);
1253	assert((sp_controls & SP_CLTS_VIRQ_DEL) == 0);
1254    }
1255
1256    if ((pin_controls & PIN_CTLS_NMI) == 0) {
1257        assert((pin_controls & PIN_CTLS_VIRT_NMI) == 0);
1258    }
1259
1260    if ((pin_controls & PIN_CTLS_VIRT_NMI) == 0) {
1261        assert((pp_controls & PP_CLTS_NMI_WINDOW) == 0);
1262    }
1263
1264    if (sp_controls & SP_CLTS_VIRT_APIC) {
1265        assert(pp_controls & PP_CLTS_SEC_CTLS);
1266	uint64_t apic_access_addr;
1267	err += vmread(VMX_APIC_ACC_F, &apic_access_addr);
1268	assert((apic_access_addr & BASE_PAGE_MASK) == 0);
1269	within_pa_width = is_within_pa_width(apic_access_addr);
1270	assert(within_pa_width);
1271    }
1272
1273    if (sp_controls & SP_CLTS_VIRT_X2APIC) {
1274        assert(pp_controls & PP_CLTS_SEC_CTLS);
1275	assert((sp_controls & SP_CLTS_VIRT_APIC) == 0);
1276    }
1277
1278    if (sp_controls & SP_CLTS_VIRQ_DEL) {
1279        assert(pp_controls & PP_CLTS_SEC_CTLS);
1280        assert(pin_controls & PIN_CTLS_EXT_INTR);
1281    }
1282
1283    if (pin_controls & PIN_CTLS_POSTED_INTR) {
1284        assert(sp_controls & SP_CLTS_VIRQ_DEL);
1285	assert(exit_controls & EXIT_CLTS_ACK_INTR);
1286
1287	uint64_t pinv;
1288	err += vmread(VMX_PINV, &pinv);
1289	// The posted-interrupt notification vector must be in the
1290	// range 0-255
1291	assert((pinv & 0xFF00) == 0);
1292
1293	uint64_t pid_addr;
1294	err += vmread(VMX_PID_F, &pid_addr);
1295	assert((pid_addr & 0x3F) == 0);
1296	within_pa_width = is_within_pa_width(pid_addr);
1297	assert(within_pa_width);
1298    }
1299
1300    if (sp_controls & SP_CLTS_ENABLE_VPID) {
1301        assert(pp_controls & PP_CLTS_SEC_CTLS);
1302
1303	uint64_t vpid;
1304	err += vmread(VMX_VPID, &vpid);
1305	assert(vpid != 0);
1306    }
1307
1308    if (sp_controls & SP_CLTS_VMCS_SHADOW) {
1309        assert(pp_controls & PP_CLTS_SEC_CTLS);
1310	uint64_t vmread_bitmap_addr, vmwrite_bitmap_addr;
1311	err += vmread(VMX_VMREAD_BMP_F, &vmread_bitmap_addr);
1312	err += vmread(VMX_VMWRITE_BMP_F, &vmwrite_bitmap_addr);
1313
1314	assert((vmread_bitmap_addr & BASE_PAGE_MASK) == 0);
1315	within_pa_width = is_within_pa_width(vmread_bitmap_addr);
1316	assert(within_pa_width);
1317
1318	assert((vmwrite_bitmap_addr & BASE_PAGE_MASK) == 0);
1319	within_pa_width = is_within_pa_width(vmwrite_bitmap_addr);
1320	assert(within_pa_width);
1321    }
1322    assert(err_is_ok(err));
1323
1324    check_vmx_controls_ept();
1325    check_vmx_controls_addr_space();
1326}
1327