1// Copyright 2017 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <arch/arm64/el2_state.h>
8#include <arch/arm64/mmu.h>
9#include <arch/asm_macros.h>
10#include <asm.h>
11#include <zircon/errors.h>
12
13#define CNTHCTL_EL2_EL1PCTEN            BIT_32(0)
14#define CNTHCTL_EL2_EL1PCEN             BIT_32(1)
15
16#define CPTR_EL2_TFP_SHIFT              10
17#define CPTR_EL2_TFP                    BIT_32(CPTR_EL2_TFP_SHIFT)
18#define CPTR_EL2_RES1                   0x33ff
19
20#define ESR_EL2_EC_FP                   0x07
21#define ESR_EL2_EC_HVC                  0x16
22#define ESR_EL2_EC_SHIFT                26
23#define ESR_EL2_ISS_MASK                0x01ffffff
24
25#define XTCR_EL2_PS_SHIFT               16
26
27#define MDCR_EL2_TDOSA                  BIT_32(10)
28
29// NOTE(abdulla): This excludes the top bit, as it is too large for VTCR_EL2.PS.
30#define ID_AA64MMFR0_EL1_PARANGE_MASK   0x07
31
32.section .text.el2,"ax",@progbits
33.align 12
34
35.macro fp_state inst, off
36    add x9, x9, \off
37    \inst q0, q1, [x9, FS_Q(0)]
38    \inst q2, q3, [x9, FS_Q(2)]
39    \inst q4, q5, [x9, FS_Q(4)]
40    \inst q6, q7, [x9, FS_Q(6)]
41    \inst q8, q9, [x9, FS_Q(8)]
42    \inst q10, q11, [x9, FS_Q(10)]
43    \inst q12, q13, [x9, FS_Q(12)]
44    \inst q14, q15, [x9, FS_Q(14)]
45    \inst q16, q17, [x9, FS_Q(16)]
46    \inst q18, q19, [x9, FS_Q(18)]
47    \inst q20, q21, [x9, FS_Q(20)]
48    \inst q22, q23, [x9, FS_Q(22)]
49    \inst q24, q25, [x9, FS_Q(24)]
50    \inst q26, q27, [x9, FS_Q(26)]
51    \inst q28, q29, [x9, FS_Q(28)]
52    \inst q30, q31, [x9, FS_Q(30)]
53.ifc "\inst", "ldp"
54    ldr x10, [x9, FS_FPSR]
55    msr fpsr, x10
56    ldr x10, [x9, FS_FPCR]
57    msr fpcr, x10
58.else
59    mrs x10, fpsr
60    str x10, [x9, FS_FPSR]
61    mrs x10, fpcr
62    str x10, [x9, FS_FPCR]
63.endif
64    sub x9, x9, \off
65.endm
66
67.macro system_register inst, off, sysreg
68.ifc "\inst", "ldr"
69    ldr x11, [x10, \off]
70    msr \sysreg, x11
71.else
72    mrs x11, \sysreg
73    str x11, [x10, \off]
74.endif
75.endm
76
77.macro system_state inst, off
78    add x10, x9, \off
79    system_register \inst, SS_SP_EL0, sp_el0
80    system_register \inst, SS_TPIDR_EL0, tpidr_el0
81    system_register \inst, SS_TPIDRRO_EL0, tpidrro_el0
82
83    system_register \inst, SS_CNTKCTL_EL1, cntkctl_el1
84    system_register \inst, SS_CONTEXTIDR_EL1, contextidr_el1
85    system_register \inst, SS_CPACR_EL1, cpacr_el1
86    system_register \inst, SS_CSSELR_EL1, csselr_el1
87    system_register \inst, SS_ELR_EL1, elr_el1
88    system_register \inst, SS_ESR_EL1, esr_el1
89    system_register \inst, SS_FAR_EL1, far_el1
90    system_register \inst, SS_MAIR_EL1, mair_el1
91    system_register \inst, SS_MDSCR_EL1, mdscr_el1
92    system_register \inst, SS_PAR_EL1, par_el1
93    system_register \inst, SS_SCTLR_EL1, sctlr_el1
94    system_register \inst, SS_SP_EL1, sp_el1
95    system_register \inst, SS_SPSR_EL1, spsr_el1
96    system_register \inst, SS_TCR_EL1, tcr_el1
97    system_register \inst, SS_TPIDR_EL1, tpidr_el1
98    system_register \inst, SS_TTBR0_EL1, ttbr0_el1
99    system_register \inst, SS_TTBR1_EL1, ttbr1_el1
100    system_register \inst, SS_VBAR_EL1, vbar_el1
101
102    system_register \inst, SS_ELR_EL2, elr_el2
103    system_register \inst, SS_SPSR_EL2, spsr_el2
104    system_register \inst, SS_VMPIDR_EL2, vmpidr_el2
105.endm
106
107.macro host_state inst, off
108    add x10, x9, \off
109.ifc "\inst", "ldp"
110    ldr x18, [x10, HS_X(0)]
111.else
112    str x18, [x10, HS_X(0)]
113.endif
114    \inst x19, x20, [x10, HS_X(1)]
115    \inst x21, x22, [x10, HS_X(3)]
116    \inst x23, x24, [x10, HS_X(5)]
117    \inst x25, x26, [x10, HS_X(7)]
118    \inst x27, x28, [x10, HS_X(9)]
119    \inst x29, x30, [x10, HS_X(11)]
120.endm
121
122.macro guest_state inst
123    \inst x0, x1, [x9, GS_X(0)]
124    \inst x2, x3, [x9, GS_X(2)]
125    \inst x4, x5, [x9, GS_X(4)]
126    \inst x6, x7, [x9, GS_X(6)]
127    \inst x10, x11, [x9, GS_X(10)]
128    \inst x12, x13, [x9, GS_X(12)]
129    \inst x14, x15, [x9, GS_X(14)]
130    \inst x16, x17, [x9, GS_X(16)]
131    \inst x18, x19, [x9, GS_X(18)]
132    \inst x20, x21, [x9, GS_X(20)]
133    \inst x22, x23, [x9, GS_X(22)]
134    \inst x24, x25, [x9, GS_X(24)]
135    \inst x26, x27, [x9, GS_X(26)]
136    \inst x28, x29, [x9, GS_X(28)]
137.ifc "\inst", "ldp"
138    ldr x30, [x9, GS_X(30)]
139.else
140    str x30, [x9, GS_X(30)]
141.endif
142.endm
143
144.macro guest_x9_state inst, reg
145    \inst x8, \reg, [x9, GS_X(8)]
146.endm
147
148.macro guest_enter_state
149    ldr x10, [x9, GS_CNTV_CVAL_EL0]
150    msr cntv_cval_el0, x10
151    ldr x10, [x9, GS_CNTV_CTL_EL0]
152    msr cntv_ctl_el0, x10
153.endm
154
155.macro guest_exit_state
156    mrs x10, cntv_ctl_el0
157    str x10, [x9, GS_CNTV_CTL_EL0]
158    mrs x10, cntv_cval_el0
159    str x10, [x9, GS_CNTV_CVAL_EL0]
160    mrs x10, esr_el2
161    str x10, [x9, GS_ESR_EL2]
162    mrs x10, far_el2
163    str x10, [x9, GS_FAR_EL2]
164    mrs x10, hpfar_el2
165    // This is not described well in the manual, but HPFAR_EL2 does not contain
166    // the lower 8 bits of the IPA, so it must be shifted.
167    lsl x10, x10, 8
168    str x10, [x9, GS_HPFAR_EL2]
169.endm
170
171.macro switch_to_guest
172    msr vttbr_el2, x0
173    isb
174.endm
175
176.macro switch_to_host
177    msr vttbr_el2, xzr
178    isb
179.endm
180
181.macro exception_return literal
182    mov x0, \literal
183    eret
184.endm
185
186.macro pop_stack
187    add sp, sp, 16
188.endm
189
190.macro hvc_jump table size
191    mrs x9, esr_el2
192
193    // Check ESR_EL2.EC to determine what caused the exception.
194    lsr x10, x9, ESR_EL2_EC_SHIFT
195    cmp x10, ESR_EL2_EC_HVC
196    b.ne .Linvalid_args_for_\table
197
198    // Check ESR_EL2.ICC to determine whether the HVC index is in range.
199    and x10, x9, ESR_EL2_ISS_MASK
200    cmp x10, \size
201    b.ge .Linvalid_args_for_\table
202
203    // Branch to the jump table.
204    adr x9, \table
205    add x9, x9, x10, lsl 2
206    br x9
207
208.Linvalid_args_for_\table:
209    exception_return ZX_ERR_INVALID_ARGS
210.endm
211
212.macro guest_exit return_code
213    // We push X9 onto the stack so we have one scratch register. We only use
214    // X9 here, so that we don't accidentally trample the guest state.
215    str x9, [sp, -16]!
216    mov x9, \return_code
217    str x9, [sp, 8]
218.endm
219
220.macro entry_init
221.align 7
222    hvc_jump .Linit_table 4
223.Linit_table:
224    b el2_hvc_psci
225    b el2_hvc_mexec
226    b el2_hvc_on
227    b el2_hvc_tlbi
228.endm
229
230.macro entry_sync return_code
231.align 7
232    guest_exit \return_code
233
234    // Check VTTBR_EL2 to determine whether the exception came from the guest or
235    // from the host.
236    mrs x9, vttbr_el2
237    cbnz x9, el2_guest_exit_or_fp_resume
238
239    // The exception came from the host, so there is no guest state to preserve.
240    pop_stack
241
242    // If we got to here, the exception came from the host or EL2.
243    // Continue execution through a jump table based on the HVC index.
244    hvc_jump .Lsync_table 6
245.Lsync_table:
246    b el2_hvc_psci
247    b el2_hvc_mexec
248    b el2_hvc_off
249    b el2_hvc_tlbi
250    b el2_hvc_resume
251    b el2_hvc_sysreg
252.endm
253
254.macro entry_irq return_code
255.align 7
256    guest_exit \return_code
257    b el2_guest_exit
258.endm
259
260.macro entry_invalid_exception
261.align 7
262    // If we got to here, the exception came from the host or EL2.
263    // We reset, as something unexpected happened.
264    mov x0, xzr
265    b psci_system_reset
266.endm
267
268// We have two vector tables that we switch between, init and exec. The reason
269// is that we need to use the stack to temporarily save registers when we exit
270// from a guest. However, that stack may have not been set up, and therefore we
271// can not unconditionally use it. We use the init vector table to set up the
272// stack and hypervisor state, and we use the exec vector table to maintain
273// execution of the hypervisor.
274
275.align 10
276FUNCTION_LABEL(arm64_el2_init_table)
277    /* exceptions from current EL, using SP0 */
278    entry_invalid_exception
279    entry_invalid_exception
280    entry_invalid_exception
281    entry_invalid_exception
282
283    /* exceptions from current EL, using SPx */
284    entry_invalid_exception
285    entry_invalid_exception
286    entry_invalid_exception
287    entry_invalid_exception
288
289    /* exceptions from lower EL, running arm64 */
290    entry_init
291    entry_invalid_exception
292    entry_invalid_exception
293    entry_invalid_exception
294
295    /* exceptions from lower EL, running arm32 */
296    entry_invalid_exception
297    entry_invalid_exception
298    entry_invalid_exception
299    entry_invalid_exception
300
301.align 10
302FUNCTION_LABEL(arm64_el2_exec_table)
303    /* exceptions from current EL, using SP0 */
304    entry_invalid_exception
305    entry_invalid_exception
306    entry_invalid_exception
307    entry_invalid_exception
308
309    /* exceptions from current EL, using SPx */
310    entry_invalid_exception
311    entry_invalid_exception
312    entry_invalid_exception
313    entry_invalid_exception
314
315    /* exceptions from lower EL, running arm64 */
316    entry_sync ZX_OK
317    entry_irq ZX_ERR_NEXT
318    entry_invalid_exception
319    entry_invalid_exception
320
321    /* exceptions from lower EL, running arm32 */
322    entry_invalid_exception
323    entry_invalid_exception
324    entry_invalid_exception
325    entry_invalid_exception
326
327// zx_status_t arm64_el2_on(zx_paddr_t ttbr0, zx_paddr_t stack_top);
328//
329// |stack_top| must point to the physical address of a contiguous stack.
330FUNCTION(arm64_el2_on)
331    hvc 2
332    ret
333END_FUNCTION(arm64_el2_on)
334FUNCTION_LABEL(el2_hvc_on)
335    // Setup the EL2 translation table.
336    msr ttbr0_el2, x0
337
338    // Setup the EL2 stack pointer.
339    mov sp, x1
340
341    // Load PARange from ID_AA64MMFR0_EL1.
342    mrs x10, id_aa64mmfr0_el1
343    and x10, x10, ID_AA64MMFR0_EL1_PARANGE_MASK
344    lsl x10, x10, XTCR_EL2_PS_SHIFT
345
346    // Setup the virtualisation translation control.
347    movlit x9, MMU_VTCR_EL2_FLAGS
348    // Combine MMU_VTCR_EL2_FLAGS with xTCR_EL2.PS.
349    orr x9, x9, x10
350    msr vtcr_el2, x9
351
352    // Setup the EL2 translation control.
353    movlit x9, MMU_TCR_EL2_FLAGS
354    // Combine MMU_TCR_EL2_FLAGS with xTCR_EL2.PS.
355    orr x9, x9, x10
356    msr tcr_el2, x9
357
358    // Setup the EL2 memory attributes.
359    movlit x9, MMU_MAIR_VAL
360    msr mair_el2, x9
361    isb
362
363    // Invalidate all EL2 TLB entries.
364    tlbi alle2
365    dsb sy
366
367    // Enable the MMU, I-cache, D-cache, and stack alignment checking.
368    movlit x9, SCTLR_ELX_M | SCTLR_ELX_C | SCTLR_ELX_SA | SCTLR_ELX_I | SCTLR_EL2_RES1
369    msr sctlr_el2, x9
370    isb
371
372    // Setup the exec vector table for EL2.
373    adr_global x9, arm64_el2_exec_table
374    msr vbar_el2, x9
375
376    exception_return ZX_OK
377
378FUNCTION_LABEL(el2_hvc_psci)
379    smc 0
380    eret
381
382FUNCTION_LABEL(el2_hvc_mexec)
383    br x0
384
385// zx_status_t arm64_el2_off();
386FUNCTION(arm64_el2_off)
387    hvc 2
388    ret
389END_FUNCTION(arm64_el2_off)
390FUNCTION_LABEL(el2_hvc_off)
391    // Disable the MMU, but enable I-cache, D-cache, and stack alignment checking.
392    movlit x9, SCTLR_ELX_C | SCTLR_ELX_SA | SCTLR_ELX_I | SCTLR_EL2_RES1
393    msr sctlr_el2, x9
394    isb
395
396    // Invalidate all EL2 TLB entries.
397    tlbi alle2
398    isb
399
400    // Setup the init vector table for EL2.
401    adr_global x9, arm64_el2_init_table
402    msr vbar_el2, x9
403    isb
404
405    exception_return ZX_OK
406
407// zx_status_t arm64_el2_tlbi_ipa(zx_paddr_t vttbr, zx_vaddr_t addr, bool terminal);
408FUNCTION(arm64_el2_tlbi_ipa)
409    mov x7, 0
410    hvc 3
411    ret
412END_FUNCTION(arm64_el2_tlbi_ipa)
413
414// zx_status_t arm64_el2_tlbi_vmid(zx_paddr_t vttbr);
415FUNCTION(arm64_el2_tlbi_vmid)
416    mov x7, 1
417    hvc 3
418    ret
419END_FUNCTION(arm64_el2_tlbi_vmid)
420
421FUNCTION_LABEL(el2_hvc_tlbi)
422    switch_to_guest
423
424    cbz x7, el2_hvc_tlbi_ipa
425    b el2_hvc_tlbi_vmid
426
427.Ltlbi_exit:
428    switch_to_host
429    exception_return ZX_OK
430
431FUNCTION_LABEL(el2_hvc_tlbi_ipa)
432    // TLBI IPAS2* instructions take bits [51:12] of the IPA.
433    lsr x1, x1, 12
434
435    // Invalidate IPA. Based on ARM DEN 0024A, page 12-5.
436    dsb ishst
437    cbnz x2, .Lterminal
438    tlbi ipas2e1is, x1
439    b .Lsync
440.Lterminal:
441    tlbi ipas2le1is, x1
442.Lsync:
443    dsb ish
444    tlbi vmalle1is
445    dsb ish
446    isb
447    b .Ltlbi_exit
448
449FUNCTION_LABEL(el2_hvc_tlbi_vmid)
450    // Invalidate VMID. Based on ARM DEN 0024A, page 12-5.
451    dsb ishst
452    tlbi vmalls12e1is
453    dsb ish
454    isb
455    b .Ltlbi_exit
456
457// zx_status_t arm64_el2_resume(zx_paddr_t vttbr, zx_paddr_t state, uint64_t hcr);
458FUNCTION(arm64_el2_resume)
459    hvc 4
460    ret
461END_FUNCTION(arm64_el2_resume)
462FUNCTION_LABEL(el2_hvc_resume)
463    switch_to_guest
464
465    // Save El2State into tpidr_el2.
466    msr tpidr_el2, x1
467    mov x9, x1
468
469    // If the guest is being run for the first time, invalidate all VMID TLB
470    // entries in case the VMID has been used previously.
471    ldr x10, [x9, ES_RESUME]
472    cbnz x10, .Lresume
473    tlbi vmalle1
474    mov x10, 1
475    str x10, [x9, ES_RESUME]
476    dsb nshst
477
478.Lresume:
479    // Set the hypervisor control register.
480    msr hcr_el2, x2
481
482    // Disable access to physical timer.
483    mov x10, CNTHCTL_EL2_EL1PCTEN
484    msr cnthctl_el2, x10
485
486    // Trap any accesses to debug related registers in the guest
487    mov x10, MDCR_EL2_TDOSA
488    msr mdcr_el2, x10
489
490    // Enable floating-point traps.
491    movlit x10, CPTR_EL2_RES1 | CPTR_EL2_TFP
492    msr cptr_el2, x10
493    isb
494
495    host_state stp, HS_X18
496    system_state str, HS_SYSTEM_STATE
497    guest_enter_state
498    system_state ldr, GS_SYSTEM_STATE
499    guest_state ldp
500    guest_x9_state ldp, x9
501
502    // Return to guest.
503    eret
504
505FUNCTION_LABEL(el2_guest_exit_or_fp_resume)
506    // Check ESR_EL2.EC to determine whether the exception was due to a
507    // floating-point trap.
508    mrs x9, esr_el2
509    lsr x9, x9, ESR_EL2_EC_SHIFT
510    cmp x9, ESR_EL2_EC_FP
511    b.eq el2_fp_resume
512
513FUNCTION_LABEL(el2_guest_exit)
514    // Load El2State from tpidr_el2.
515    mrs x9, tpidr_el2
516
517    guest_state stp
518    // Load X9 from the stack, and save it in GuestState.
519    ldr x10, [sp]
520    guest_x9_state stp, x10
521    system_state str, GS_SYSTEM_STATE
522    guest_exit_state
523    system_state ldr, HS_SYSTEM_STATE
524    host_state ldp, HS_X18
525
526    mrs x10, cptr_el2
527    tbnz x10, CPTR_EL2_TFP_SHIFT, .Lfp_untrap
528
529    // Restore floating-point state if it was modified.
530    fp_state stp, GS_FP_STATE
531    fp_state ldp, HS_FP_STATE
532    b .Lfp_done
533
534.Lfp_untrap:
535    // Disable floating-point traps.
536    mov x10, CPTR_EL2_RES1
537    msr cptr_el2, x10
538
539.Lfp_done:
540    // Disable virtual timer set by guest, and enable access to physical timer.
541    msr cntv_ctl_el0, xzr
542    mov x10, CNTHCTL_EL2_EL1PCTEN | CNTHCTL_EL2_EL1PCEN
543    msr cnthctl_el2, x10
544
545    // Don't trap debug register access to EL2.
546    msr mdcr_el2, xzr
547
548    // Disable guest traps, and ensure EL1 is arm64.
549    mov x10, HCR_EL2_RW
550    msr hcr_el2, x10
551    isb
552
553    switch_to_host
554
555    // Return to host.
556    ldr x0, [sp, 8]
557    pop_stack
558    eret
559
560FUNCTION_LABEL(el2_fp_resume)
561    // Save x10 so we have an extra register for floating-point state swapping.
562    // We're returning to the guest so we don't need the return code in [sp, 8].
563    str x10, [sp, 8]
564
565    // Disable floating-point traps and reset exception syndrome.
566    mov x9, CPTR_EL2_RES1
567    msr cptr_el2, x9
568    msr esr_el2, xzr
569    isb
570
571    // Load El2State from tpidr_el2.
572    mrs x9, tpidr_el2
573
574    fp_state stp, HS_FP_STATE
575    fp_state ldp, GS_FP_STATE
576
577    // Return to guest.
578    ldp x9, x10, [sp], 16
579    eret