1/**
2 * \file
3 * \brief x86-32 kernel page-table setup
4 */
5
6/*
7 * Copyright (c) 2007-2013 ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <kernel.h>
16#include <paging_kernel_arch.h>
17#include <dispatch.h>
18
19/*
20 * Page attribute bitmaps for various address spaces.
21 */
22#define MEM_PAGE_BITMAP                                 \
23    (X86_32_PTABLE_PRESENT | X86_32_PTABLE_READ_WRITE | \
24     X86_32_PTABLE_GLOBAL_PAGE)
25#define DEVICE_PAGE_BITMAP                              \
26    (X86_32_PTABLE_PRESENT | X86_32_PTABLE_READ_WRITE | \
27     X86_32_PTABLE_CACHE_DISABLED | PTABLE_GLOBAL_PAGE)
28
29#ifdef CONFIG_PAE
30
31/*
32 * Table requirements for various address spaces.
33 */
34#define MEM_PDIR_SIZE           X86_32_PDIR_ENTRIES(X86_32_PADDR_SPACE_LIMIT)
35
36/**
37 * Kernel PDPTE table.
38 */
39static union x86_32_pdpte_entry pdpte[X86_32_PDPTE_SIZE]
40__attribute__((aligned(X86_32_BASE_PAGE_SIZE)));
41
42/**
43 * Page directory for physical memory address space.
44 */
45static union x86_32_ptable_entry mem_pdir[MEM_PDIR_SIZE][X86_32_PTABLE_SIZE]
46__attribute__((aligned(X86_32_BASE_PAGE_SIZE)));
47
48static inline void mapit(union x86_32_pdpte_entry *pdpte_base,
49                         union x86_32_ptable_entry *pdir_base, lpaddr_t addr,
50                         uint64_t bitmap)
51{
52    if(!X86_32_IS_PRESENT(pdpte_base)) {
53        paging_x86_32_map_pdpte(pdpte_base,
54                                mem_to_local_phys((lvaddr_t)pdir_base));
55    }
56
57    if(!X86_32_IS_PRESENT(pdir_base)) {
58        debug(SUBSYS_PAGING, "mapped!\n");
59    } else {
60        //remap the page anyway, this is important for the memory latency benchmark
61        debug(SUBSYS_PAGING, "already existing! remapping it\n");
62    }
63
64    paging_x86_32_map_large(pdir_base, addr, bitmap);
65}
66
67#else
68
69#       ifdef CONFIG_PSE
70
71/**
72 * Page directory for physical memory address space.
73 */
74static union x86_32_ptable_entry pdir[X86_32_PTABLE_SIZE]
75__attribute__((aligned(X86_32_BASE_PAGE_SIZE)));
76
77static inline void mapit(union x86_32_ptable_entry *pdir_base, lpaddr_t addr,
78                         uint64_t bitmap)
79{
80    if(!X86_32_IS_PRESENT(pdir_base)) {
81        debug(SUBSYS_PAGING, "mapped!\n");
82    } else {
83        //remap the page anyway, this is important for the memory latency benchmark
84        debug(SUBSYS_PAGING, "already existing! remapping it\n");
85    }
86
87    paging_x86_32_map_large(pdir_base, addr, bitmap);
88}
89
90#       else
91
92/**
93 * Page directory for physical memory address space.
94 */
95static union x86_32_pdir_entry pdir[X86_32_PTABLE_SIZE]
96__attribute__((aligned(X86_32_BASE_PAGE_SIZE)));
97
98/**
99 * Page table for physical memory address space.
100 */
101static union x86_32_ptable_entry mem_ptable[MEM_PTABLE_SIZE][X86_32_PTABLE_SIZE]
102__attribute__((aligned(X86_32_BASE_PAGE_SIZE)));
103
104static inline void mapit(union x86_32_pdir_entry *pdir_base,
105                         union x86_32_ptable_entry *ptable_base,
106                         lpaddr_t addr, uint64_t bitmap)
107{
108    if(!X86_32_IS_PRESENT(pdir_base)) {
109        paging_x86_32_map_table(pdir_base,
110                                mem_to_local_phys((lvaddr_t)ptable_base));
111    }
112
113    if(!X86_32_IS_PRESENT(ptable_base)) {
114        debug(SUBSYS_PAGING, "mapped!\n");
115    } else {
116        //remap the page anyway, this is important for the memory latency benchmark
117        debug(SUBSYS_PAGING, "already existing! remapping it\n");
118    }
119
120    paging_x86_32_map(ptable_base, addr, bitmap);
121}
122
123#       endif
124
125#endif
126
127/**
128 * \brief Map a region of physical memory into physical memory address space.
129 *
130 * Maps the region of physical memory, based at base and sized size bytes
131 * to the same-sized virtual memory region. All pages are flagged according to
132 * bitmap. This function automatically fills the needed page directory entries
133 * in the page hierarchy rooted at pml4. base and size will be made
134 * page-aligned by this function.
135 *
136 * \param base          Base address of memory region
137 * \param size          Size in bytes of memory region
138 * \param bitmap        Bitmap of flags for page tables/directories
139 *
140 * \return 0 on success, -1 on error (out of range)
141 */
142static int paging_x86_32_map_mem(lpaddr_t base, size_t size, uint64_t bitmap)
143{
144    lvaddr_t    vaddr, vbase = local_phys_to_mem(base);
145    lpaddr_t    addr;
146
147    paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE);
148
149    // Is mapped region out of range?
150    assert(local_phys_to_gen_phys(base + size) <= X86_32_PADDR_SPACE_LIMIT);
151    if(local_phys_to_gen_phys(base + size) > X86_32_PADDR_SPACE_LIMIT) {
152        printk(LOG_ERR, "Mapped region [%"PRIxLPADDR",%"PRIxLPADDR"]"
153                        "out of physical address range!",
154               base, base + size);
155        return -1;
156    }
157
158    assert(local_phys_to_gen_phys(vbase + size) <= X86_32_VADDR_SPACE_SIZE);
159
160    // Map pages, tables and directories
161    for(vaddr = vbase, addr = base;;
162        vaddr += X86_32_MEM_PAGE_SIZE, addr += X86_32_MEM_PAGE_SIZE) {
163#ifdef CONFIG_PAE
164        union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)];
165        union x86_32_ptable_entry *pdir_base =
166            &mem_pdir[X86_32_PDPTE_BASE(addr)][X86_32_PDIR_BASE(vaddr)];
167#else
168        union x86_32_pdir_entry *pdir_base = (union x86_32_pdir_entry*) &pdir[X86_32_PDIR_BASE(vaddr)];
169#       ifndef CONFIG_PSE
170        union x86_32_ptable_entry *ptable_base =
171            &mem_ptable[X86_32_PDIR_BASE(addr)][X86_32_PTABLE_BASE(vaddr)];
172#       endif
173#endif
174
175        if(vbase + size != 0) {
176            if(vaddr >= vbase + size) {
177                break;
178            }
179        }
180
181#ifdef CONFIG_PAE
182        debug(SUBSYS_PAGING, "Mapping 2M page: vaddr = 0x%"PRIxLVADDR", addr = 0x%"PRIxLVADDR", "
183              "PDPTE_BASE = %"PRIuLPADDR", PDIR_BASE = %"PRIuLPADDR" -- ", vaddr,
184              addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr));
185        mapit(pdpte_base, pdir_base, addr, bitmap);
186#else
187#       ifdef CONFIG_PSE
188        debug(SUBSYS_PAGING, "Mapping 4M page: vaddr = 0x%"PRIxLVADDR", addr = 0x%"PRIxLVADDR", "
189              "PDIR_BASE = %"PRIuLPADDR" -- ", vaddr,
190              addr, X86_32_PDIR_BASE(vaddr));
191        mapit((union x86_32_ptable_entry*)pdir_base, addr, bitmap);
192#       else
193        debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR", "
194              "addr = 0x%"PRIxLVADDR", "
195              "PDIR_BASE = %"PRIuLPADDR", PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr,
196              addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr));
197        mapit(pdir_base, ptable_base, addr, bitmap);
198#       endif
199#endif
200
201        if(vbase + size == 0) {
202            // Bail out if mapped last page of address space to prevent overflow
203            if(vaddr == 0xffe00000) {
204                break;
205            }
206        }
207    }
208
209    return 0;
210}
211
212lvaddr_t paging_x86_32_map_special(lpaddr_t base, size_t size, uint64_t bitmap)
213{
214    // Allocate backwards from a page below end of address space
215    static lvaddr_t vbase = (lvaddr_t)X86_32_VADDR_SPACE_SIZE;
216    lpaddr_t addr;
217    lvaddr_t vaddr;
218
219    paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE);
220
221    // Align physical base address
222    lpaddr_t offset = base & (X86_32_MEM_PAGE_SIZE - 1);
223    base -= offset;
224
225    if(vbase - size < X86_32_VADDR_SPACE_SIZE - X86_32_DEVICE_SPACE_LIMIT) {
226        return 0;
227    }
228
229    // Map pages, tables and directories (reverse order)
230    for(vaddr = vbase - X86_32_MEM_PAGE_SIZE,
231            addr = base + size - X86_32_MEM_PAGE_SIZE;
232        vaddr >= vbase - size;
233        vaddr -= X86_32_MEM_PAGE_SIZE, addr -= X86_32_MEM_PAGE_SIZE) {
234#ifdef CONFIG_PAE
235        union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)];
236        union x86_32_ptable_entry *pdir_base =
237            &mem_pdir[X86_32_PDPTE_BASE(mem_to_local_phys(vaddr))][X86_32_PDIR_BASE(vaddr)];
238
239        debug(SUBSYS_PAGING, "Mapping 2M device page: vaddr = 0x%"PRIxLPADDR", addr = 0x%"PRIxLPADDR", "
240              "PDPTE_BASE = %"PRIxLPADDR", PDIR_BASE = %"PRIxLPADDR" -- ", vaddr,
241              addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr));
242        mapit(pdpte_base, pdir_base, addr, bitmap);
243#else
244#       ifdef CONFIG_PSE
245        union x86_32_ptable_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];
246
247        debug(SUBSYS_PAGING, "Mapping 4M device page: vaddr = 0x%"PRIxLPADDR", addr = 0x%"PRIxLPADDR", "
248              "PDIR_BASE = %"PRIxLPADDR" -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr));
249        mapit(pdir_base, addr, bitmap);
250#       else
251        union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)];
252        union x86_32_ptable_entry *ptable_base =
253            &mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)][X86_32_PTABLE_BASE(vaddr)];
254
255        debug(SUBSYS_PAGING, "Mapping 4K device page: vaddr = 0x%"PRIxLVADDR", "
256              "addr = 0x%"PRIxLPADDR", "
257              "PDIR_BASE = %"PRIxLPADDR", PTABLE_BASE = %"PRIxLPADDR", pdir = %p, ptable = %p -- ",
258              vaddr, addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr), pdir,
259              mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)]);
260        mapit(pdir_base, ptable_base, addr, bitmap);
261#       endif
262#endif
263    }
264
265    vbase -= size;
266    return vbase + offset;
267}
268
269lvaddr_t paging_x86_32_map_device(lpaddr_t base, size_t size)
270{
271    return paging_x86_32_map_special(base, size, DEVICE_PAGE_BITMAP);
272}
273
274int paging_x86_32_map_memory(lpaddr_t base, size_t size)
275{
276    return paging_x86_32_map_mem(base, size, MEM_PAGE_BITMAP);
277}
278
279/**
280 * \brief Reset kernel paging.
281 *
282 * This function resets the page maps for kernel and memory-space. It clears out
283 * all other mappings. Use this only at system bootup!
284 */
285void paging_x86_32_reset(void)
286{
287    // Re-map physical memory
288    // XXX: Map in what we get from Multiboot. We should actually map
289    // stuff dynamically, whenever raw mem gets retyped into a kernel
290    // object
291/*     if(paging_map_memory(0, multiboot_info->mem_upper * 1024 + 0x100000) */
292    lpaddr_t lpaddr = gen_phys_to_local_phys(X86_32_PADDR_SPACE_LIMIT -
293                                             X86_32_DEVICE_SPACE_LIMIT);
294    if(paging_x86_32_map_memory(0, lpaddr) != 0) {
295        panic("error while mapping physical memory!");
296    }
297
298    // Switch to new page layout
299#ifdef CONFIG_PAE
300    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdpte));
301#else
302    paging_x86_32_context_switch(mem_to_local_phys((lvaddr_t)pdir));
303#endif
304}
305
306#ifdef CONFIG_PAE
307/**
308 * \brief Make a "good" PDPTE table out of a page table.
309 *
310 * A "good" PDPTE table is one that has all physical address space and
311 * the kernel mapped in. This function modifies the passed PDPTE, based
312 * at physical address 'base' accordingly. It does this by taking out
313 * the corresponding entries of the kernel's pristine PDPTE table.
314 *
315 * \param base  Physical base address of PDPTE table to make "good".
316 */
317void paging_x86_32_make_good_pdpte(lpaddr_t base)
318{
319    union x86_32_pdpte_entry   *newpdpte =
320        (union x86_32_pdpte_entry *)local_phys_to_mem(base);
321    int                 i;
322
323    debug(SUBSYS_PAGING, "Is now a PDPTE: table = 0x%"PRIuLPADDR"\n", base);
324    // Map memory
325    for(i = X86_32_PDPTE_BASE(X86_32_MEMORY_OFFSET); i < X86_32_PDPTE_SIZE; i++) {
326        newpdpte[i] = pdpte[i];
327    }
328}
329#else
330/**
331 * \brief Make a "good" PDE table out of a page table.
332 *
333 * A "good" PDE table is one that has all physical address space and
334 * the kernel mapped in. This function modifies the passed PDE, based
335 * at physical address 'base' accordingly. It does this by taking out
336 * the corresponding entries of the kernel's pristine PDE table.
337 *
338 * \param base  Physical base address of PDE table to make "good".
339 */
340void paging_x86_32_make_good_pdir(lpaddr_t base)
341{
342#ifdef CONFIG_PSE
343    union x86_32_ptable_entry  *newpdir =
344        (union x86_32_ptable_entry *)local_phys_to_mem(base);
345#else
346    union x86_32_pdir_entry  *newpdir =
347        (union x86_32_pdir_entry *)local_phys_to_mem(base);
348#endif
349    int                 i;
350
351    debug(SUBSYS_PAGING, "Is now a PDE: table = 0x%" PRIxLPADDR "\n", base);
352
353    // Map memory
354    for(i = X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET); i < X86_32_PDIR_SIZE; i++) {
355        newpdir[i] = pdir[i];
356    }
357}
358#endif
359