1/**
2 * \file
3 * \brief ARMv8 kernel page-table structures.
4 */
5
6/*
7 * Copyright (c) 2015, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#ifndef KERNEL_ARCH_ARMv8_PAGING_H
16#define KERNEL_ARCH_ARMv8_PAGING_H
17
18// XXX: Not sure if these includes are required
19#include <capabilities.h>
20#include <barrelfish_kpi/cpu.h>
21#include <barrelfish_kpi/paging_arch.h>
22#include <sysreg.h>
23
24
25// Resolves to the required number of entries in Ln to map `limit' number of bytes.
26#define ARMv8_L0_ENTRIES(limit) (VMSAv8_64_L0_BASE((limit) - 1) + 1)
27#define ARMv8_L1_ENTRIES(limit) (VMSAv8_64_L1_BASE((limit) - 1) + 1)
28#define ARMv8_L2_ENTRIES(limit) (VMSAv8_64_L2_BASE((limit) - 1) + 1)
29#define ARMv8_L3_ENTRIES(limit) (VMSAv8_64_L3_BASE((limit) - 1) + 1)
30
31/**
32 * A translation table entry for VMSAv8-64 stage 1.
33 */
34union armv8_ttable_entry {
35    uint64_t raw;
36    struct {
37        uint64_t        valid       :1;
38        uint64_t        mb1         :1;         // 1 -> table, 0 -> block
39        uint64_t        ignored1    :10;        // lower block attrs, ignored for table
40        uint64_t        base        :36;        // base address of next level table
41        uint64_t        reserved1   :4;
42        uint64_t        ignored2    :7;
43        uint64_t        pxntable    :1;         // only stage 1, executable from EL1
44        uint64_t        uxntable    :1;         // only stage 1, executable from EL0
45        uint64_t        aptable     :2;         // only stage 1, access from EL0
46        uint64_t        nstable     :1;         // only stage 1, access from secure state
47    } d;
48    struct {
49        uint64_t        valid       :1;
50        uint64_t        mb0         :1;         // 1 -> table, 0 -> block
51        uint64_t        attrindex   :3;         // mem attr index field, D4-1798
52        uint64_t        ns          :1;         // non-secure bit
53        uint64_t        ap          :2;         // access permissions bits
54        uint64_t        sh          :2;         // shareability field
55        uint64_t        af          :1;         // accessed flag
56        uint64_t        ng          :1;         // not global bit
57        uint64_t        reserved1   :18;
58        uint64_t        base        :18;
59        uint64_t        reserved2   :4;
60        uint64_t        contiguous  :1;         // hint that entry is part of set
61                                                // of contiguous entries, D4-1811
62        uint64_t        pxn         :1;         // privileged execute never bit
63        uint64_t        uxn         :1;         // (user) execute never bit
64        uint64_t        avail1      :4;         // available for SW use
65        uint64_t        ignored1    :5;
66    } block_l1;
67    struct {
68        uint64_t        valid       :1;
69        uint64_t        mb0         :1;         // 1 -> table, 0 -> block
70        uint64_t        attrindex   :3;         // mem attr index field, D4-1798
71        uint64_t        ns          :1;         // non-secure bit
72        uint64_t        ap          :2;         // access permissions bits
73        uint64_t        sh          :2;         // shareability field
74        uint64_t        af          :1;         // accessed flag
75        uint64_t        ng          :1;         // not global bit
76        uint64_t        reserved1   :9;
77        uint64_t        base        :27;
78        uint64_t        reserved2   :4;
79        uint64_t        contiguous  :1;         // hint that entry is part of set
80                                                // of contiguous entries, D4-1811
81        uint64_t        pxn         :1;         // privileged execute never bit
82        uint64_t        uxn         :1;         // (user) execute never bit
83        uint64_t        avail1      :4;         // available for SW use
84        uint64_t        ignored1    :5;
85    } block_l2;
86    struct {
87        uint64_t        valid       :1;
88        uint64_t        mb1         :1;         // 0 -> makes entry invalid
89        uint64_t        attrindex   :3;         // mem attr index field, D4-1798
90        uint64_t        ns          :1;         // non-secure bit
91        uint64_t        ap          :2;         // access permissions bits
92        uint64_t        sh          :2;         // shareability field
93        uint64_t        af          :1;         // accessed flag
94        uint64_t        ng          :1;         // not global bit
95        uint64_t        base        :36;
96        uint64_t        reserved1   :4;
97        uint64_t        contiguous  :1;         // hint that entry is part of set
98                                                // of contiguous entries, D4-1811
99        uint64_t        pxn         :1;         // privileged execute never bit
100        uint64_t        uxn         :1;         // (user) execute never bit
101        uint64_t        avail1      :4;         // available for SW use
102        uint64_t        ignored1    :5;
103    } page;
104};
105
106STATIC_ASSERT_SIZEOF(union armv8_ttable_entry, sizeof(uint64_t));
107
108
109
110/**
111 * Setup bootstrap page table with direct and relocated mappings for kernel.
112 *
113 * This function does not enable paging.
114 *
115 * @param initial_base
116 * @param initial_size
117 */
118void paging_map_kernel(uintptr_t initial_base, size_t initial_size);
119
120lvaddr_t paging_map_device(lpaddr_t base, size_t size);
121
122/**
123 * \brief Return whether we have enabled the MMU. Useful for
124 * initialization assertions
125 */
126extern bool paging_mmu_enabled(void);
127
128/**
129 * Maps a device to a l2 page.
130 * Assumption: corresponding L1 entry already set
131 *
132 */
133
134void paging_map_device_page(uintptr_t l1_table,
135					   	    lvaddr_t device_vbase,
136					   	    lpaddr_t device_pbase,
137					   	    size_t device_bytes);
138
139/**
140 * Add kernel mappings to newly constructed page table.
141 *
142 * @param new_table_addr  address of newly constructed page table.
143 * @param new_table_bytes size of newly constructed page table.
144 */
145void paging_make_good(lpaddr_t base);
146
147void paging_map_table_l0(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr);
148
149void paging_map_table_l1(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr);
150
151void paging_map_table_l2(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr);
152
153void paging_map_block_l1(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr, uintptr_t flags);
154
155void paging_map_block_l2(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr, uintptr_t flags);
156
157void paging_map_page_l3(union armv8_ttable_entry *table_addr, lvaddr_t vaddr, lpaddr_t paddr, uintptr_t flags);
158
159void paging_set_l3_entry(union armv8_ttable_entry *l3_entry, lpaddr_t pa, uintptr_t flags);
160
161//void paging_set_l2_entry(union armv8_ttable_entry *l2entry, lpaddr_t paddr, uintptr_t flags);
162//
163//void paging_set_l3_entry(union armv8_ttable_entry *l2entry, lpaddr_t paddr, uintptr_t flags);
164//
165void paging_context_switch(lpaddr_t table_addr);
166
167void paging_arm_reset(lpaddr_t paddr, size_t bytes);
168
169
170// REVIEW: [2010-05-04 orion]
171// these were deprecated in churn, enabling now to get system running again.
172
173void paging_map_kernel_section(union armv8_ttable_entry *ttbase,lvaddr_t vbase, lpaddr_t pbase);
174void paging_map_kernel_l1_block(union armv8_ttable_entry *ttbase,lvaddr_t vbase, lpaddr_t pbase);
175
176void paging_map_memory(union armv8_ttable_entry *ttbase, lpaddr_t paddr, size_t bytes);
177
178static inline bool is_root_pt(enum objtype type) {
179    return type == ObjType_VNode_AARCH64_l0;
180}
181
182static inline size_t get_pte_size(void) {
183    return PTABLE_ENTRY_SIZE;
184}
185
186static inline void do_one_tlb_flush(genvaddr_t vaddr)
187{
188    // TODO: figure out selective flushing for ARMv8
189    sysreg_invalidate_tlb();
190}
191
192static inline void do_selective_tlb_flush(genvaddr_t vaddr, genvaddr_t vend)
193{
194    // TODO: figure out selective flushing for ARMv8
195    sysreg_invalidate_tlb();
196}
197
198static inline void do_full_tlb_flush(void)
199{
200    sysreg_invalidate_tlb();
201}
202
203
204#endif // KERNEL_ARCH_ARMv8_PAGING_H
205