1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <util.h>
9#include <api/types.h>
10#include <arch/types.h>
11#include <arch/model/statedata.h>
12#include <arch/object/structures.h>
13#include <linker.h>
14#include <plat/machine/hardware.h>
15
16#ifdef CONFIG_ARM_SMMU
17#include <arch/object/smmu.h>
18#endif
19
20
21asid_pool_t *armKSASIDTable[BIT(asidHighBits)];
22
23/* AArch64 Memory map explanation:
24 *
25 * EL1 and EL2 kernel build vaddrspace layouts:
26 *
27 * On AArch64, the EL1 and EL2 builds of the kernel both use approx 512GiB of
28 * virtual address space.
29 *
30 * The EL1 kernel resides within approx 512 GiB of virtual memory somewhere
31 * within the canonical top (not necessarily the same as the actual top, but all
32 * the unused high bits of kernel virtual addresses are set to 1) of every
33 * user VSpace.
34 *
35 * The EL2 kernel resides within approx 512 GiB of virtual memory somewhere
36 * within the canonical bottom (all the unused high bits are set to 0) of its
37 * own separate virtual address space.
38 *
39 * Common Aarch64 address space layout:
40 *
41 * The reason why 512 GiB was chosen is because assuming a 48-bit virtual
42 * address space using a 4KiB Translation Granule (and therefore, 4 levels of
43 * page tables):
44 *
45 * One top-level page-structure entry maps 512 GiB, so by limiting ourselves to
46 * 512 GiB, we only need to pre-allocate 1 level0 table (lvl0 is the top level),
47 * 1 level1 table, 512 level2 tables (with each level2 entry mapping 2MiB), and
48 * skip level3.
49 *
50 * We do maintain a single level3 table however, which is mapped into the last
51 * entry in the last level2 table, such that the last 2MiB are mapped using
52 * 4KiB pages instead of 2MiB pages. The reason for this last 2MiB being mapped
53 * using small pages is because this last 2MiB is where the kernel maps all the
54 * different devices it uses (see map_kernel_devices()). This implies that the
55 * kernel can only have up to approx 512GiB of kernel untypeds.
56 *
57 * If you wish for your AArch64 platform to use more than 512 GiB of virtual
58 * memory, you will need to change the number of pre-allocated page tables below
59 * to be sufficient to contain the mapping you want. And don't forget to update
60 * this comment here afterward.
61 */
62
63/* User vaddrspace layouts:
64 *
65 * For EL2:
66 *
67 * A plain-english explanation of the memory layout is that the
68 * the user address spaces cover the address range from 0x0 to the maximum
69 * IPA.
70 *
71 * So for a CPU that can generate 44 bits of IPA/PA (such as the TX1/TX2), user
72 * vaddr spaces will cover 16TiB from 0x0 to 0x00000fff_ffffffff.
73 *
74 * Basically by making the guest physical address spaces 44 bits, the guest
75 * kernels can access all of (what they think is) physical memory, while
76 * allowing us to potentially trap accesses by the guests to physical memory
77 * beyond what the processor can address.
78 *
79 * For EL1:
80 *
81 * The standard canonical-high and canonical-low split using TCR_EL1.TBI
82 * applies.
83 */
84
85vspace_root_t armKSGlobalUserVSpace[BIT(seL4_VSpaceIndexBits)] ALIGN_BSS(BIT(seL4_VSpaceBits));
86pgde_t armKSGlobalKernelPGD[BIT(PGD_INDEX_BITS)] ALIGN_BSS(BIT(PGD_SIZE_BITS));
87
88pude_t armKSGlobalKernelPUD[BIT(PUD_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PUDBits));
89pde_t armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS)][BIT(PD_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PageDirBits));
90pte_t armKSGlobalKernelPT[BIT(PT_INDEX_BITS)] ALIGN_BSS(BIT(seL4_PageTableBits));
91
92#ifdef CONFIG_KERNEL_LOG_BUFFER
93pde_t *armKSGlobalLogPDE = &armKSGlobalKernelPDs[BIT(PUD_INDEX_BITS) - 1][BIT(PD_INDEX_BITS) - 2];
94compile_assert(log_pude_is_correct_preallocated_pude,
95               GET_PUD_INDEX(KS_LOG_PPTR) == BIT(PUD_INDEX_BITS) - 1);
96compile_assert(log_pde_is_correct_preallocated_pde,
97               GET_PD_INDEX(KS_LOG_PPTR) == BIT(PD_INDEX_BITS) - 2);
98#endif
99
100#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
101UP_STATE_DEFINE(vcpu_t, *armHSCurVCPU);
102UP_STATE_DEFINE(bool_t, armHSVCPUActive);
103
104/* The hardware VMID to virtual ASID mapping table.
105 * The ARMv8 supports 8-bit VMID which is used as logical ASID
106 * when the kernel runs in EL2.
107 */
108asid_t armKSHWASIDTable[BIT(hwASIDBits)];
109hw_asid_t armKSNextASID;
110#endif
111
112#ifdef CONFIG_ARM_SMMU
113/*recording the state of created SID caps*/
114bool_t smmuStateSIDTable[SMMU_MAX_SID];
115/* CNode containing the cb_cap that is assigned to sids*/
116cte_t smmuStateSIDNode[BIT(SMMU_SID_CNODE_SLOT_BITS)] ALIGN(BIT(SMMU_SID_CNODE_SLOT_BITS + seL4_SlotBits));
117compile_assert(smmuStateSIDCNodeSize, sizeof(smmuStateSIDNode) >= ((SMMU_MAX_SID) * sizeof(cte_t)));
118
119/*recording the state of the created cb caps*/
120bool_t smmuStateCBTable[SMMU_MAX_CB];
121/* CNode containing the vcapce root cap that is assigned to sids*/
122cte_t smmuStateCBNode[BIT(SMMU_CB_CNODE_SLOT_BITS)] ALIGN(BIT(SMMU_CB_CNODE_SLOT_BITS + seL4_SlotBits));
123compile_assert(smmuStateCBCNodeSize, sizeof(smmuStateCBNode) >= ((SMMU_MAX_CB) * sizeof(cte_t)));
124/*recording the context bank to ASID relationship*/
125asid_t smmuStateCBAsidTable[SMMU_MAX_CB];
126#endif
127