1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10#include <types.h>
11#include <api/failures.h>
12#include <object/structures.h>
13
14#define activate_global_pd activate_kernel_vspace
15#define MODE_RESERVED 0
16
17/* The VTABLE_VMID_SLOT in user-level applications's vspace root
18 * is reserved for storing its allocated hardware 8-bit VMID
19 * when running EL2. Note that this assumes that the IPA size for S2
20 * translation and the VA size for the S1 translation do not use full
21 * 48-bit. Please see the definition of seL4_UserTop for details.
22 */
23#define VTABLE_VMID_SLOT   MASK(seL4_VSpaceIndexBits)
24
25/* The VTABLE_SMMU_SLOT in user-level applications's vspace root is reserved
26 * for storing the number of context banks bound with this vspace when the
27 * SMMU feature is enabled. This assumes the user-level address space do not
28 * use the second last entry in the vspace root, which is preserved by the
29 * seL4_UserTop.
30 */
31#define VTABLE_SMMU_SLOT   (MASK(seL4_VSpaceIndexBits) - 1)
32
33/* ==================== BOOT CODE FINISHES HERE ==================== */
34
35bool_t CONST isVTableRoot(cap_t cap);
36bool_t CONST isValidNativeRoot(cap_t cap);
37
38pgde_t *pageUpperDirectoryMapped(asid_t asid, vptr_t vaddr, pude_t *pud);
39pude_t *pageDirectoryMapped(asid_t asid, vptr_t vaddr, pde_t *pd);
40void unmapPageUpperDirectory(asid_t asid, vptr_t vaddr, pude_t *pud);
41void unmapPageDirectory(asid_t asid, vptr_t vaddr, pde_t *pd);
42
43void unmapPageTable(asid_t asid, vptr_t vaddr, pte_t *pt);
44void unmapPage(vm_page_size_t page_size, asid_t asid, vptr_t vptr, pptr_t pptr);
45
46void deleteASIDPool(asid_t base, asid_pool_t *pool);
47void deleteASID(asid_t asid, vspace_root_t *vspace);
48#ifdef CONFIG_ARM_HYPERVISOR_SUPPORT
49hw_asid_t getHWASID(asid_t asid);
50#endif
51
52#ifdef __clang__
53static const region_t BOOT_RODATA mode_reserved_region[] = {};
54#else
55static const region_t BOOT_RODATA *mode_reserved_region = NULL;
56#endif
57
58#ifdef AARCH64_VSPACE_S2_START_L1
59
60#define cap_vtable_root_cap cap_page_upper_directory_cap
61#define cap_vtable_root_get_mappedASID(_c) \
62    cap_page_upper_directory_cap_get_capPUDMappedASID(_c)
63#define cap_vtable_root_get_basePtr(_c) \
64    VSPACE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(_c))
65#define cap_vtable_root_isMapped(_c) \
66    cap_page_upper_directory_cap_get_capPUDIsMapped(_c)
67
68#ifdef CONFIG_ARM_SMMU
69#define cap_vtable_root_get_mappedCB(_c) \
70    cap_page_upper_directory_cap_get_capPUDMappedCB(_c)
71#define cap_vtable_root_ptr_set_mappedCB(_c, cb) \
72    cap_page_upper_directory_cap_ptr_set_capPUDMappedCB(_c, cb)
73#define cap_vtable_cap_new(_a, _v, _m) cap_page_upper_directory_cap_new(_a, _v, _m, 0, CB_INVALID)
74#define vtable_invalid_new(_a, _v) pude_pude_invalid_new(_a, _v, 0)
75#define vtable_invalid_smmu_new(_cb) pude_pude_invalid_new(0, false, _cb)
76#define vtable_invalid_get_bind_cb(_v) \
77    pude_pude_invalid_get_bind_cb(_v)
78#else
79#define cap_vtable_cap_new(_a, _v, _m) cap_page_upper_directory_cap_new(_a, _v, _m, 0)
80#define vtable_invalid_new(_a, _v) pude_pude_invalid_new(_a, _v)
81#endif  /*!CONFIG_ARM_SMMU*/
82
83#define vtable_invalid_get_stored_asid_valid(_v) \
84    pude_pude_invalid_get_stored_asid_valid(_v)
85#define vtable_invalid_get_stored_hw_asid(_v) pude_pude_invalid_get_stored_hw_asid(_v)
86
87static inline exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *cte)
88{
89    cap_page_upper_directory_cap_ptr_set_capPUDMappedASID(&cte->cap, asid);
90    cap_page_upper_directory_cap_ptr_set_capPUDIsMapped(&cte->cap, 1);
91    poolPtr->array[asid & MASK(asidLowBits)] =
92        PUDE_PTR(cap_page_upper_directory_cap_get_capPUDBasePtr(cte->cap));
93#ifdef CONFIG_ARM_SMMU
94    vspace_root_t *vtable = poolPtr->array[asid & MASK(asidLowBits)];
95    vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(0);
96#endif
97    return EXCEPTION_NONE;
98}
99
100
101#else
102
103#define cap_vtable_root_cap cap_page_global_directory_cap
104#define cap_vtable_root_get_mappedASID(_c) \
105    cap_page_global_directory_cap_get_capPGDMappedASID(_c)
106#define cap_vtable_root_get_basePtr(_c) \
107    PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(_c))
108#define cap_vtable_root_isMapped(_c) cap_page_global_directory_cap_get_capPGDIsMapped(_c)
109
110#ifdef CONFIG_ARM_SMMU
111#define cap_vtable_root_get_mappedCB(_c) \
112    cap_page_global_directory_cap_get_capPGDMappedCB(_c)
113#define cap_vtable_root_ptr_set_mappedCB(_c, cb) \
114    cap_page_global_directory_cap_ptr_set_capPGDMappedCB(_c, cb)
115#define cap_vtable_cap_new(_a, _v, _m) \
116    cap_page_global_directory_cap_new(_a, _v, _m, CB_INVALID)
117#define vtable_invalid_new(_a, _v) pgde_pgde_invalid_new(_a, _v, 0)
118#define vtable_invalid_smmu_new(_cb) pgde_pgde_invalid_new(0, false, _cb)
119#define vtable_invalid_get_bind_cb(_v) \
120    pgde_pgde_invalid_get_bind_cb(_v)
121#else
122#define cap_vtable_cap_new(_a, _v, _m) \
123    cap_page_global_directory_cap_new(_a, _v, _m)
124#define vtable_invalid_new(_a, _v) pgde_pgde_invalid_new(_a, _v)
125#endif /*!CONFIG_ARM_SMMU*/
126
127#define vtable_invalid_get_stored_asid_valid(_v) \
128    pgde_pgde_invalid_get_stored_asid_valid(_v)
129#define vtable_invalid_get_stored_hw_asid(_v) pgde_pgde_invalid_get_stored_hw_asid(_v)
130
131static inline exception_t performASIDPoolInvocation(asid_t asid, asid_pool_t *poolPtr, cte_t *cte)
132{
133    cap_page_global_directory_cap_ptr_set_capPGDMappedASID(&cte->cap, asid);
134    cap_page_global_directory_cap_ptr_set_capPGDIsMapped(&cte->cap, 1);
135    poolPtr->array[asid & MASK(asidLowBits)] =
136        PGDE_PTR(cap_page_global_directory_cap_get_capPGDBasePtr(cte->cap));
137
138#ifdef CONFIG_ARM_SMMU
139    vspace_root_t *vtable = poolPtr->array[asid & MASK(asidLowBits)];
140    vtable[VTABLE_SMMU_SLOT] = vtable_invalid_smmu_new(0);
141#endif
142    return EXCEPTION_NONE;
143}
144
145void increaseASIDBindCB(asid_t asid);
146void decreaseASIDBindCB(asid_t asid);
147
148#endif
149