1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10
11#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
12
13#include <arch/object/vcpu.h>
14#include <drivers/timer/arm_generic.h>
15
16#ifdef CONFIG_DISABLE_WFI_WFE_TRAPS
17/* Trap SMC and override CPSR.AIF */
18#define HCR_COMMON ( HCR_TSC | HCR_AMO | HCR_IMO \
19                   | HCR_FMO | HCR_DC  | HCR_VM)
20#else
21/* Trap WFI/WFE/SMC and override CPSR.AIF */
22#define HCR_COMMON ( HCR_TSC | HCR_TWE | HCR_TWI | HCR_AMO | HCR_IMO \
23                   | HCR_FMO | HCR_DC  | HCR_VM)
24#endif
25/* Allow native tasks to run at PL1, but restrict access */
26#define HCR_NATIVE ( HCR_COMMON | HCR_TGE | HCR_TVM | HCR_TTLB | HCR_TCACHE \
27                   | HCR_TAC | HCR_SWIO)
28#define HCR_VCPU   (HCR_COMMON)
29
30/* Amongst other things we set the caches to enabled by default. This
31 * may cause problems when booting guests that expect caches to be
32 * disabled */
33#define SCTLR_DEFAULT 0xc5187c
34#define ACTLR_DEFAULT 0x40
35
36static inline word_t get_lr_svc(void)
37{
38    word_t ret;
39    asm("mrs %[ret], lr_svc" : [ret]"=r"(ret));
40    return ret;
41}
42
43static inline void set_lr_svc(word_t val)
44{
45    asm("msr lr_svc, %[val]" :: [val]"r"(val));
46}
47
48static inline word_t get_sp_svc(void)
49{
50    word_t ret;
51    asm("mrs %[ret], sp_svc" : [ret]"=r"(ret));
52    return ret;
53}
54
55static inline void set_sp_svc(word_t val)
56{
57    asm("msr sp_svc, %[val]" :: [val]"r"(val));
58}
59
60static inline word_t get_spsr_svc(void)
61{
62    word_t ret;
63    asm("mrs %[ret], spsr_svc" : [ret]"=r"(ret));
64    return ret;
65}
66
67static inline void set_spsr_svc(word_t val)
68{
69    asm("msr spsr_svc, %[val]" :: [val]"r"(val));
70}
71
72static inline word_t get_lr_abt(void)
73{
74    word_t ret;
75    asm("mrs %[ret], lr_abt" : [ret]"=r"(ret));
76    return ret;
77}
78
79static inline void set_lr_abt(word_t val)
80{
81    asm("msr lr_abt, %[val]" :: [val]"r"(val));
82}
83
84static inline word_t get_sp_abt(void)
85{
86    word_t ret;
87    asm("mrs %[ret], sp_abt" : [ret]"=r"(ret));
88    return ret;
89}
90
91static inline void set_sp_abt(word_t val)
92{
93    asm("msr sp_abt, %[val]" :: [val]"r"(val));
94}
95
96static inline word_t get_spsr_abt(void)
97{
98    word_t ret;
99    asm("mrs %[ret], spsr_abt" : [ret]"=r"(ret));
100    return ret;
101}
102
103static inline void set_spsr_abt(word_t val)
104{
105    asm("msr spsr_abt, %[val]" :: [val]"r"(val));
106}
107
108static inline word_t get_lr_und(void)
109{
110    word_t ret;
111    asm("mrs %[ret], lr_und" : [ret]"=r"(ret));
112    return ret;
113}
114
115static inline void set_lr_und(word_t val)
116{
117    asm("msr lr_und, %[val]" :: [val]"r"(val));
118}
119
120static inline word_t get_sp_und(void)
121{
122    word_t ret;
123    asm("mrs %[ret], sp_und" : [ret]"=r"(ret));
124    return ret;
125}
126
127static inline void set_sp_und(word_t val)
128{
129    asm("msr sp_und, %[val]" :: [val]"r"(val));
130}
131
132static inline word_t get_spsr_und(void)
133{
134    word_t ret;
135    asm("mrs %[ret], spsr_und" : [ret]"=r"(ret));
136    return ret;
137}
138
139static inline void set_spsr_und(word_t val)
140{
141    asm("msr spsr_und, %[val]" :: [val]"r"(val));
142}
143
144static inline word_t get_lr_irq(void)
145{
146    word_t ret;
147    asm("mrs %[ret], lr_irq" : [ret]"=r"(ret));
148    return ret;
149}
150
151static inline void set_lr_irq(word_t val)
152{
153    asm("msr lr_irq, %[val]" :: [val]"r"(val));
154}
155
156static inline word_t get_sp_irq(void)
157{
158    word_t ret;
159    asm("mrs %[ret], sp_irq" : [ret]"=r"(ret));
160    return ret;
161}
162
163static inline void set_sp_irq(word_t val)
164{
165    asm("msr sp_irq, %[val]" :: [val]"r"(val));
166}
167
168static inline word_t get_spsr_irq(void)
169{
170    word_t ret;
171    asm("mrs %[ret], spsr_irq" : [ret]"=r"(ret));
172    return ret;
173}
174
175static inline void set_spsr_irq(word_t val)
176{
177    asm("msr spsr_irq, %[val]" :: [val]"r"(val));
178}
179
180static inline word_t get_lr_fiq(void)
181{
182    word_t ret;
183    asm("mrs %[ret], lr_fiq" : [ret]"=r"(ret));
184    return ret;
185}
186
187static inline void set_lr_fiq(word_t val)
188{
189    asm("msr lr_fiq, %[val]" :: [val]"r"(val));
190}
191
192static inline word_t get_sp_fiq(void)
193{
194    word_t ret;
195    asm("mrs %[ret], sp_fiq" : [ret]"=r"(ret));
196    return ret;
197}
198
199static inline void set_sp_fiq(word_t val)
200{
201    asm("msr sp_fiq, %[val]" :: [val]"r"(val));
202}
203
204static inline word_t get_spsr_fiq(void)
205{
206    word_t ret;
207    asm("mrs %[ret], spsr_fiq" : [ret]"=r"(ret));
208    return ret;
209}
210
211static inline void set_spsr_fiq(word_t val)
212{
213    asm("msr spsr_fiq, %[val]" :: [val]"r"(val));
214}
215
216static inline word_t get_r8_fiq(void)
217{
218    word_t ret;
219    asm("mrs %[ret], r8_fiq" : [ret]"=r"(ret));
220    return ret;
221}
222
223static inline void set_r8_fiq(word_t val)
224{
225    asm("msr r8_fiq, %[val]" :: [val]"r"(val));
226}
227
228static inline word_t get_r9_fiq(void)
229{
230    word_t ret;
231    asm("mrs %[ret], r9_fiq" : [ret]"=r"(ret));
232    return ret;
233}
234
235static inline void set_r9_fiq(word_t val)
236{
237    asm("msr r9_fiq, %[val]" :: [val]"r"(val));
238}
239
240static inline word_t get_r10_fiq(void)
241{
242    word_t ret;
243    asm("mrs %[ret], r10_fiq" : [ret]"=r"(ret));
244    return ret;
245}
246
247static inline void set_r10_fiq(word_t val)
248{
249    asm("msr r10_fiq, %[val]" :: [val]"r"(val));
250}
251
252static inline word_t get_r11_fiq(void)
253{
254    word_t ret;
255    asm("mrs %[ret], r11_fiq" : [ret]"=r"(ret));
256    return ret;
257}
258
259static inline void set_r11_fiq(word_t val)
260{
261    asm("msr r11_fiq, %[val]" :: [val]"r"(val));
262}
263
264static inline word_t get_r12_fiq(void)
265{
266    word_t ret;
267    asm("mrs %[ret], r12_fiq" : [ret]"=r"(ret));
268    return ret;
269}
270
271static inline void set_r12_fiq(word_t val)
272{
273    asm("msr r12_fiq, %[val]" :: [val]"r"(val));
274}
275static inline word_t get_cntv_tval(void)
276{
277    word_t ret = 0;
278    MRC(CNTV_TVAL, ret);
279    return ret;
280}
281
282static inline void set_cntv_tval(word_t val)
283{
284    MCR(CNTV_TVAL, val);
285}
286
287static inline word_t get_cntv_ctl(void)
288{
289    word_t ret = 0;
290    MRC(CNTV_CTL, ret);
291    return ret;
292}
293
294static inline void set_cntv_ctl(word_t val)
295{
296    MCR(CNTV_CTL, val);
297}
298
299#ifdef ENABLE_SMP_SUPPORT
300static inline word_t get_vmpidr(void)
301{
302    word_t ret = 0;
303    MRC(VMPIDR, ret);
304    return ret;
305}
306
307static inline void set_vmpidr(word_t val)
308{
309    MCR(VMPIDR, val);
310}
311#endif
312
313/** MODIFIES: phantom_machine_state */
314/** DONT_TRANSLATE */
315static inline void set_cntv_cval_64(uint64_t val)
316{
317    MCRR(CNTV_CVAL, val);
318}
319
320/** MODIFIES: */
321/** DONT_TRANSLATE */
322static inline uint64_t get_cntv_cval_64(void)
323{
324    uint64_t ret = 0;
325    MRRC(CNTV_CVAL, ret);
326    return ret;
327}
328
329static inline void set_cntv_cval_high(word_t val)
330{
331    uint64_t ret = get_cntv_cval_64();
332    uint64_t cval_high = (uint64_t) val << 32 ;
333    uint64_t cval_low = (ret << 32) >> 32;
334    set_cntv_cval_64(cval_high | cval_low);
335}
336
337static inline word_t get_cntv_cval_high(void)
338{
339    uint64_t ret = get_cntv_cval_64();
340    return (word_t)(ret >> 32);
341}
342
343static inline void set_cntv_cval_low(word_t val)
344{
345    uint64_t ret = get_cntv_cval_64();
346    uint64_t cval_high = (ret >> 32) << 32;
347    uint64_t cval_low = (uint64_t) val;
348    set_cntv_cval_64(cval_high | cval_low);
349}
350
351static inline word_t get_cntv_cval_low(void)
352{
353    uint64_t ret = get_cntv_cval_64();
354    return (word_t) ret;
355}
356
357/** MODIFIES: phantom_machine_state */
358/** DONT_TRANSLATE */
359static inline void set_cntv_off_64(uint64_t val)
360{
361    MCRR(CNTVOFF, val);
362}
363
364/** MODIFIES: */
365/** DONT_TRANSLATE */
366static inline uint64_t get_cntv_off_64(void)
367{
368    uint64_t ret = 0;
369    MRRC(CNTVOFF, ret);
370    return ret;
371}
372
373static inline void set_cntv_off_high(word_t val)
374{
375    uint64_t ret = get_cntv_off_64();
376    uint64_t cv_off_high = (uint64_t) val << 32 ;
377    uint64_t cv_off_low = (ret << 32) >> 32;
378    set_cntv_off_64(cv_off_high | cv_off_low);
379}
380
381static inline word_t get_cntv_off_high(void)
382{
383    uint64_t ret = get_cntv_off_64();
384    return (word_t)(ret >> 32);
385}
386
387static inline void set_cntv_off_low(word_t val)
388{
389    uint64_t ret = get_cntv_off_64();
390    uint64_t cv_off_high = (ret >> 32) << 32;
391    uint64_t cv_off_low = (uint64_t) val;
392    set_cntv_off_64(cv_off_high | cv_off_low);
393}
394
395static inline word_t get_cntv_off_low(void)
396{
397    uint64_t ret = get_cntv_off_64();
398    return (word_t) ret;
399}
400
401static word_t vcpu_hw_read_reg(word_t reg_index)
402{
403    word_t reg = 0;
404    switch (reg_index) {
405    case seL4_VCPUReg_SCTLR:
406        return getSCTLR();
407    case seL4_VCPUReg_ACTLR:
408        return getACTLR();
409    case seL4_VCPUReg_TTBCR:
410        return readTTBCR();
411    case seL4_VCPUReg_TTBR0:
412        return readTTBR0();
413    case seL4_VCPUReg_TTBR1:
414        return readTTBR1();
415    case seL4_VCPUReg_DACR:
416        return readDACR();
417    case seL4_VCPUReg_DFSR:
418        return getDFSR();
419    case seL4_VCPUReg_IFSR:
420        return getIFSR();
421    case seL4_VCPUReg_ADFSR:
422        return getADFSR();
423    case seL4_VCPUReg_AIFSR:
424        return getAIFSR();
425    case seL4_VCPUReg_DFAR:
426        return getDFAR();
427    case seL4_VCPUReg_IFAR:
428        return getIFAR();
429    case seL4_VCPUReg_PRRR:
430        return getPRRR();
431    case seL4_VCPUReg_NMRR:
432        return getNMRR();
433    case seL4_VCPUReg_CIDR:
434        return getCIDR();
435    case seL4_VCPUReg_TPIDRPRW:
436        return readTPIDRPRW();
437    case seL4_VCPUReg_FPEXC:
438        return reg;
439    case seL4_VCPUReg_LRsvc:
440        return get_lr_svc();
441    case seL4_VCPUReg_SPsvc:
442        return get_sp_svc();
443    case seL4_VCPUReg_LRabt:
444        return get_lr_abt();
445    case seL4_VCPUReg_SPabt:
446        return get_sp_abt();
447    case seL4_VCPUReg_LRund:
448        return get_lr_und();
449    case seL4_VCPUReg_SPund:
450        return get_sp_und();
451    case seL4_VCPUReg_LRirq:
452        return get_lr_irq();
453    case seL4_VCPUReg_SPirq:
454        return get_sp_irq();
455    case seL4_VCPUReg_LRfiq:
456        return get_lr_fiq();
457    case seL4_VCPUReg_SPfiq:
458        return get_sp_fiq();
459    case seL4_VCPUReg_R8fiq:
460        return get_r8_fiq();
461    case seL4_VCPUReg_R9fiq:
462        return get_r9_fiq();
463    case seL4_VCPUReg_R10fiq:
464        return get_r10_fiq();
465    case seL4_VCPUReg_R11fiq:
466        return get_r11_fiq();
467    case seL4_VCPUReg_R12fiq:
468        return get_r12_fiq();
469    case seL4_VCPUReg_SPSRsvc:
470        return get_spsr_svc();
471    case seL4_VCPUReg_SPSRabt:
472        return get_spsr_abt();
473    case seL4_VCPUReg_SPSRund:
474        return get_spsr_und();
475    case seL4_VCPUReg_SPSRirq:
476        return get_spsr_irq();
477    case seL4_VCPUReg_SPSRfiq:
478        return get_spsr_fiq();
479    case seL4_VCPUReg_CNTV_CTL:
480        return get_cntv_ctl();
481    case seL4_VCPUReg_CNTV_CVALhigh:
482        return get_cntv_cval_high();
483    case seL4_VCPUReg_CNTV_CVALlow:
484        return get_cntv_cval_low();
485    case seL4_VCPUReg_CNTVOFFhigh:
486        return get_cntv_off_high();
487    case seL4_VCPUReg_CNTVOFFlow:
488        return get_cntv_off_low();
489#ifdef ENABLE_SMP_SUPPORT
490    case seL4_VCPUReg_VMPIDR:
491        return get_vmpidr();
492#endif /* ENABLE_SMP_SUPPORT */
493    default:
494        fail("ARM/HYP: Invalid register index");
495    }
496}
497
498static void vcpu_hw_write_reg(word_t reg_index, word_t reg)
499{
500    switch (reg_index) {
501    case seL4_VCPUReg_SCTLR:
502        setSCTLR(reg);
503        break;
504    case seL4_VCPUReg_ACTLR:
505        setACTLR(reg);
506        break;
507    case seL4_VCPUReg_TTBCR:
508        writeTTBCR(reg);
509        break;
510    case seL4_VCPUReg_TTBR0:
511        writeTTBR0(reg);
512        break;
513    case seL4_VCPUReg_TTBR1:
514        writeTTBR1(reg);
515        break;
516    case seL4_VCPUReg_DACR:
517        writeDACR(reg);
518        break;
519    case seL4_VCPUReg_DFSR:
520        setDFSR(reg);
521        break;
522    case seL4_VCPUReg_IFSR:
523        setIFSR(reg);
524        break;
525    case seL4_VCPUReg_ADFSR:
526        setADFSR(reg);
527        break;
528    case seL4_VCPUReg_AIFSR:
529        setAIFSR(reg);
530        break;
531    case seL4_VCPUReg_DFAR:
532        setDFAR(reg);
533        break;
534    case seL4_VCPUReg_IFAR:
535        setIFAR(reg);
536        break;
537    case seL4_VCPUReg_PRRR:
538        setPRRR(reg);
539        break;
540    case seL4_VCPUReg_NMRR:
541        setNMRR(reg);
542        break;
543    case seL4_VCPUReg_CIDR:
544        setCIDR(reg);
545        break;
546    case seL4_VCPUReg_TPIDRPRW:
547        writeTPIDRPRW(reg);
548        break;
549    case seL4_VCPUReg_FPEXC:
550        break;
551    case seL4_VCPUReg_LRsvc:
552        set_lr_svc(reg);
553        break;
554    case seL4_VCPUReg_SPsvc:
555        set_sp_svc(reg);
556        break;
557    case seL4_VCPUReg_LRabt:
558        set_lr_abt(reg);
559        break;
560    case seL4_VCPUReg_SPabt:
561        set_sp_abt(reg);
562        break;
563    case seL4_VCPUReg_LRund:
564        set_lr_und(reg);
565        break;
566    case seL4_VCPUReg_SPund:
567        set_sp_und(reg);
568        break;
569    case seL4_VCPUReg_LRirq:
570        set_lr_irq(reg);
571        break;
572    case seL4_VCPUReg_SPirq:
573        set_sp_irq(reg);
574        break;
575    case seL4_VCPUReg_LRfiq:
576        set_lr_fiq(reg);
577        break;
578    case seL4_VCPUReg_SPfiq:
579        set_sp_fiq(reg);
580        break;
581    case seL4_VCPUReg_R8fiq:
582        set_r8_fiq(reg);
583        break;
584    case seL4_VCPUReg_R9fiq:
585        set_r9_fiq(reg);
586        break;
587    case seL4_VCPUReg_R10fiq:
588        set_r10_fiq(reg);
589        break;
590    case seL4_VCPUReg_R11fiq:
591        set_r11_fiq(reg);
592        break;
593    case seL4_VCPUReg_R12fiq:
594        set_r12_fiq(reg);
595        break;
596    case seL4_VCPUReg_SPSRsvc:
597        set_spsr_svc(reg);
598        break;
599    case seL4_VCPUReg_SPSRabt:
600        set_spsr_abt(reg);
601        break;
602    case seL4_VCPUReg_SPSRund:
603        set_spsr_und(reg);
604        break;
605    case seL4_VCPUReg_SPSRirq:
606        set_spsr_irq(reg);
607        break;
608    case seL4_VCPUReg_SPSRfiq:
609        set_spsr_fiq(reg);
610        break;
611    case seL4_VCPUReg_CNTV_CTL:
612        set_cntv_ctl(reg);
613        break;
614    case seL4_VCPUReg_CNTV_CVALhigh:
615        set_cntv_cval_high(reg);
616        break;
617    case seL4_VCPUReg_CNTV_CVALlow:
618        set_cntv_cval_low(reg);
619        break;
620    case seL4_VCPUReg_CNTVOFFhigh:
621        set_cntv_off_high(reg);
622        break;
623    case seL4_VCPUReg_CNTVOFFlow:
624        set_cntv_off_low(reg);
625        break;
626#ifdef ENABLE_SMP_SUPPORT
627    case seL4_VCPUReg_VMPIDR:
628        set_vmpidr(reg);
629        break;
630#endif /* ENABLE_SMP_SUPPORT */
631    default:
632        fail("ARM/HYP: Invalid register index");
633    }
634}
635
636#ifdef CONFIG_HAVE_FPU
637static inline void access_fpexc(vcpu_t *vcpu, bool_t write)
638{
639    /* save a copy of the current status since
640     * the enableFpuHyp modifies the armHSFPUEnabled
641     */
642    bool_t flag = ARCH_NODE_STATE(armHSFPUEnabled);
643    if (!flag) {
644        enableFpuInstInHyp();
645    }
646    if (write) {
647        VMSR(FPEXC, vcpu_read_reg(vcpu, seL4_VCPUReg_FPEXC));
648    } else {
649        word_t fpexc;
650        VMRS(FPEXC, fpexc);
651        vcpu_write_reg(vcpu, seL4_VCPUReg_FPEXC, fpexc);
652    }
653    /* restore the status */
654    if (!flag) {
655        trapFpuInstToHyp();
656    }
657}
658#endif
659
660static inline void armv_vcpu_boot_init(void)
661{
662#if defined(ARM_HYP_TRAP_CP14_IN_VCPU_THREADS) || defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
663    /* On the verified build, we have implemented a workaround that ensures
664     * that we don't need to save and restore the debug coprocessor's state
665     * (and therefore don't have to expose the CP14 registers to verification).
666     *
667     * This workaround is simple: we just trap and intercept all Guest VM
668     * accesses to the debug coprocessor, and deliver them as VMFault
669     * messages to the VM Monitor. To that end, the VM Monitor can then
670     * choose to either kill the Guest VM, or it can also choose to silently
671     * step over the Guest VM's accesses to the debug coprocessor, thereby
672     * silently eliminating the communication channel between the Guest VMs
673     * (because the debug coprocessor acted as a communication channel
674     * unless we saved/restored its state between VM switches).
675     *
676     * This workaround delegates the communication channel responsibility
677     * from the kernel to the VM Monitor, essentially.
678     */
679    initHDCR();
680#endif
681}
682static inline void armv_vcpu_save(vcpu_t *vcpu, bool_t active)
683{
684    /* save registers */
685    vcpu_save_reg_range(vcpu, seL4_VCPUReg_ACTLR, seL4_VCPUReg_SPSRfiq);
686
687#ifdef ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS
688    /* This is done when we are asked to save and restore the CP14 debug context
689     * of VCPU threads; the register context is saved into the underlying TCB.
690     */
691    saveAllBreakpointState(vcpu->vcpuTCB);
692#endif
693    isb();
694#ifdef CONFIG_HAVE_FPU
695    /* Other FPU registers are still lazily saved and restored when
696     * handleFPUFault is called. See the comments in vcpu_enable
697     * for more information.
698     */
699    if (active && nativeThreadUsingFPU(vcpu->vcpuTCB)) {
700        access_fpexc(vcpu, false);
701    }
702#endif
703}
704
705
706static inline void vcpu_enable(vcpu_t *vcpu)
707{
708    vcpu_restore_reg(vcpu, seL4_VCPUReg_SCTLR);
709    setHCR(HCR_VCPU);
710    isb();
711
712    /* Turn on the VGIC */
713    set_gic_vcpu_ctrl_hcr(vcpu->vgic.hcr);
714
715#if !defined(ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS) && defined(ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS)
716    /* This is guarded by an #ifNdef (negation) ARM_CP14_SAVE_AND_RESTORE_NATIVE_THREADS
717     * because if it wasn't, we'd be calling restore_user_debug_context twice
718     * on a debug-API build; recall that restore_user_debug_context is called
719     * in restore_user_context.
720     *
721     * We call restore_user_debug_context here, because vcpu_restore calls this
722     * function (vcpu_enable). It's better to embed the
723     * restore_user_debug_context call in here than to call it in the outer
724     * level caller (vcpu_switch), because if the structure of this VCPU code
725     * changes later on, it will be less likely that the person who changes
726     * the code will be able to omit the debug register context restore, if
727     * it's done here.
728     */
729    restore_user_debug_context(vcpu->vcpuTCB);
730#endif
731#if defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
732    /* Disable debug exception trapping and let the PL1 Guest VM handle all
733     * of its own debug faults.
734     */
735    setHDCRTrapDebugExceptionState(false);
736#endif
737#ifdef CONFIG_HAVE_FPU
738    /* We need to restore the FPEXC value early for the following reason:
739     *
740     * 1: When an application inside a VM is trying to execute an FPU
741     * instruction and the EN bit of FPEXC is disabled, an undefined
742     * instruction exception is sent to the guest Linux kernel instead of
743     * the seL4. Until the Linux kernel examines the EN bit of the FPEXC
744     * to determine if the exception FPU related, a VCPU trap is sent to
745     * the seL4 kernel. However, it can be too late to restore the value
746     * of saved FPEXC in the VCPU trap handler: if the EN bit of the saved
747     * FPEXC is enabled, the Linux kernel thinks the FPU is enabled and
748     * thus refuses to handle the exception. The result is the application
749     * is killed with the cause of illegal instruction.
750     *
751     * Note that we restore the FPEXC here, but the current FPU owner
752     * can be a different thread. Thus, it seems that we are modifying
753     * another thread's FPEXC. However, the modification is OK.
754     *
755     * 1: If the other thread is a native thread, even if the EN bit of
756     * the FPEXC is enabled, a trap th HYP mode will be triggered when
757     * the thread tries to use the FPU.
758     *
759     * 2: If the other thread has a VCPU, the FPEXC is already saved
760     * in the VCPU's vcpu->fpexc when the VCPU is saved or disabled.
761     *
762     * We also overwrite the fpuState.fpexc with the value saved in
763     * vcpu->fpexc. Since the following scenario can happen:
764     *
765     * VM0 (the FPU owner) -> VM1 (update the FPEXC in vcpu_enable) ->
766     * switchLocalFpuOwner (save VM0 with modified FPEXC) ->
767     * VM1 (the new FPU owner)
768     *
769     * In the case above, the fpuState.fpexc of VM0 saves the value written
770     * by the VM1, but the vcpu->fpexc of VM0 still contains the correct
771     * value when VM0 is disabed (vcpu_disable) or saved (vcpu_save).
772     *
773     *
774     */
775
776    vcpu->vcpuTCB->tcbArch.tcbContext.fpuState.fpexc = vcpu_read_reg(vcpu, seL4_VCPUReg_FPEXC);
777    access_fpexc(vcpu, true);
778#endif
779    /* Restore virtual timer state */
780    restore_virt_timer(vcpu);
781
782}
783
784static inline void vcpu_disable(vcpu_t *vcpu)
785{
786    uint32_t hcr;
787    dsb();
788    if (likely(vcpu)) {
789        hcr = get_gic_vcpu_ctrl_hcr();
790        vcpu->vgic.hcr = hcr;
791        vcpu_save_reg(vcpu, seL4_VCPUReg_SCTLR);
792        isb();
793#ifdef CONFIG_HAVE_FPU
794        if (nativeThreadUsingFPU(vcpu->vcpuTCB)) {
795            access_fpexc(vcpu, false);
796        }
797#endif
798    }
799    /* Turn off the VGIC */
800    set_gic_vcpu_ctrl_hcr(0);
801    isb();
802
803    /* Stage 1 MMU off */
804    setSCTLR(SCTLR_DEFAULT);
805    setHCR(HCR_NATIVE);
806
807#if defined(ARM_HYP_CP14_SAVE_AND_RESTORE_VCPU_THREADS)
808    /* Disable all breakpoint registers from triggering their
809     * respective events, so that when we switch from a guest VM
810     * to a native thread, the native thread won't trigger events
811     * that were caused by things the guest VM did.
812     */
813    loadAllDisabledBreakpointState();
814#endif
815#if defined(ARM_HYP_TRAP_CP14_IN_NATIVE_USER_THREADS)
816    /* Enable debug exception trapping and let seL4 trap all PL0 (user) native
817     * seL4 threads' debug exceptions, so it can deliver them as fault messages.
818     */
819    setHDCRTrapDebugExceptionState(true);
820#endif
821    isb();
822    if (likely(vcpu)) {
823        /* Save virtual timer state */
824        save_virt_timer(vcpu);
825        /* Mask the virtual timer interrupt */
826        maskInterrupt(true, CORE_IRQ_TO_IRQT(CURRENT_CPU_INDEX(), INTERRUPT_VTIMER_EVENT));
827    }
828}
829
830static inline void armv_vcpu_init(vcpu_t *vcpu)
831{
832    vcpu_write_reg(vcpu, seL4_VCPUReg_SCTLR, SCTLR_DEFAULT);
833    vcpu_write_reg(vcpu, seL4_VCPUReg_ACTLR, ACTLR_DEFAULT);
834}
835
836#define HSR_FPU_FAULT   (0x1fe0000a)
837#define HSR_TASE_FAULT  (0x1fe00020)
838
839static inline bool_t armv_handleVCPUFault(word_t hsr)
840{
841#ifdef CONFIG_HAVE_FPU
842    if (hsr == HSR_FPU_FAULT || hsr == HSR_TASE_FAULT) {
843        assert(!isFpuEnable());
844        handleFPUFault();
845        setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread)));
846        return true;
847    }
848#endif
849
850    return false;
851}
852
853
854#endif /* End of CONFIG_ARM_HYPERVISOR_SUPPORT */
855
856
857