1/**
2 * \file
3 * \brief x86-64 kernel page-table setup
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <kernel.h>
16#include <paging_kernel_arch.h>
17
18#ifdef __k1om__
19#include <xeon_phi.h>
20#define PADDR_SPACE_LIMIT K1OM_PADDR_SPACE_LIMIT
21#define PTABLE_GLOBAL_PAGE_BIT 0
22#define MEMORY_OFFSET K1OM_MEMORY_OFFSET
23#define KERNEL_INIT_MEMORY K1OM_KERNEL_INIT_MEMORY
24#else
25#define PADDR_SPACE_LIMIT X86_64_PADDR_SPACE_LIMIT
26#define PTABLE_GLOBAL_PAGE_BIT X86_64_PTABLE_GLOBAL_PAGE
27#define MEMORY_OFFSET X86_64_MEMORY_OFFSET
28#define KERNEL_INIT_MEMORY X86_64_KERNEL_INIT_MEMORY
29#endif
30
31/*
32 * Table requirements for various address spaces.
33 */
34#define MEM_PDPT_SIZE           X86_64_PDPT_ENTRIES(PADDR_SPACE_LIMIT)
35#define MEM_PDIR_SIZE           X86_64_PDIR_ENTRIES(PADDR_SPACE_LIMIT)
36
37/*
38 * Page attribute bitmaps for various address spaces.
39 */
40#define MEM_PAGE_BITMAP                                 \
41    (X86_64_PTABLE_PRESENT | X86_64_PTABLE_READ_WRITE | \
42     PTABLE_GLOBAL_PAGE_BIT)
43#define DEVICE_PAGE_BITMAP                                      \
44    (X86_64_PTABLE_PRESENT | X86_64_PTABLE_READ_WRITE |         \
45     X86_64_PTABLE_CACHE_DISABLED | PTABLE_GLOBAL_PAGE_BIT)
46
47/**
48 * Kernel page map level 4 table.
49 */
50static union x86_64_pdir_entry pml4[X86_64_PTABLE_SIZE]
51__attribute__((aligned(X86_64_BASE_PAGE_SIZE)));
52
53/**
54 * Page directory pointer table for physical memory address space.
55 */
56static union x86_64_pdir_entry mem_pdpt[MEM_PDPT_SIZE][X86_64_PTABLE_SIZE]
57__attribute__((aligned(X86_64_BASE_PAGE_SIZE)));
58
59/**
60 * Page directory for physical memory address space.
61 */
62static union x86_64_ptable_entry mem_pdir[MEM_PDPT_SIZE][MEM_PDIR_SIZE][X86_64_PTABLE_SIZE]
63__attribute__((aligned(X86_64_BASE_PAGE_SIZE)));
64
65static inline void mapit(union x86_64_pdir_entry *pml4_base,
66                         union x86_64_pdir_entry *pdpt_base,
67                         union x86_64_ptable_entry *pdir_base, lpaddr_t addr,
68                         uint64_t bitmap)
69{
70    if(!X86_64_IS_PRESENT(pml4_base)) {
71        paging_x86_64_map_table(pml4_base,
72                                mem_to_local_phys((lvaddr_t)pdpt_base));
73    }
74
75    if(!X86_64_IS_PRESENT(pdpt_base)) {
76        paging_x86_64_map_table(pdpt_base,
77                                mem_to_local_phys((lvaddr_t)pdir_base));
78    }
79
80    if(!X86_64_IS_PRESENT(pdir_base)) {
81        debug(SUBSYS_PAGING, "mapped!\n");
82        paging_x86_64_map_large(pdir_base, addr, bitmap);
83    } else {
84//remap the page anyway, this is important for the memory latency benchmark
85        debug(SUBSYS_PAGING, "already existing! remapping it\n");
86        paging_x86_64_map_large(pdir_base, addr, bitmap);
87    }
88}
89
90/**
91 * \brief Map a region of physical memory into physical memory address space.
92 *
93 * Maps the region of physical memory, based at base and sized size bytes
94 * to the same-sized virtual memory region. All pages are flagged according to
95 * bitmap. This function automatically fills the needed page directory entries
96 * in the page hierarchy rooted at pml4. base and size will be made
97 * page-aligned by this function.
98 *
99 * \param base          Physical base address of memory region
100 * \param size          Size in bytes of memory region
101 * \param bitmap        Bitmap of flags for page tables/directories
102 *
103 * \return 0 on success, -1 on error (out of range)
104 */
105static int paging_map_mem(lpaddr_t base, size_t size, uint64_t bitmap)
106{
107    lvaddr_t vaddr, vbase = local_phys_to_mem(base);
108    lpaddr_t addr;
109
110    // Align given physical base address
111    if(base & X86_64_MEM_PAGE_MASK) {
112        base -= base & X86_64_MEM_PAGE_MASK;
113    }
114
115    paging_align(&vbase, &base, &size, X86_64_MEM_PAGE_SIZE);
116
117    // Is mapped region out of range?
118    assert(base + size <= (lpaddr_t)PADDR_SPACE_LIMIT);
119    if(base + size > (lpaddr_t)PADDR_SPACE_LIMIT) {
120        return -1;
121    }
122
123    // Map pages, tables and directories
124    for(vaddr = vbase, addr = base; vaddr < vbase + size;
125        vaddr += X86_64_MEM_PAGE_SIZE, addr += X86_64_MEM_PAGE_SIZE) {
126        union x86_64_pdir_entry *pml4_base =
127            &pml4[X86_64_PML4_BASE(vaddr)],
128            *pdpt_base = &mem_pdpt[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(vaddr)];
129        union x86_64_ptable_entry *pdir_base =
130            &mem_pdir[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(addr)][X86_64_PDIR_BASE(vaddr)];
131
132        debug(SUBSYS_PAGING, "Mapping 2M page: vaddr = 0x%"PRIxLVADDR"x, addr = 0x%lx, "
133              "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu -- ", vaddr,
134              addr, X86_64_PML4_BASE(vaddr), X86_64_PDPT_BASE(vaddr),
135              X86_64_PDIR_BASE(vaddr));
136
137        mapit(pml4_base, pdpt_base, pdir_base, addr, bitmap);
138    }
139    // XXX FIXME: get rid of this TBL flush code, or move it elsewhere
140    // uint64_t cr3;
141    // __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
142    // __asm__ __volatile__("mov %0,%%cr3" :  : "a" (cr3));
143
144    return 0;
145}
146
147lvaddr_t paging_x86_64_map_device(lpaddr_t base, size_t size)
148{
149    if(paging_map_mem(base, size, DEVICE_PAGE_BITMAP) == 0) {
150        return local_phys_to_mem(base);
151    } else {
152        return 0;
153    }
154}
155
156int paging_x86_64_map_memory(lpaddr_t base, size_t size)
157{
158    return paging_map_mem(base, size, MEM_PAGE_BITMAP);
159}
160
161/**
162 * \brief Reset kernel paging.
163 *
164 * This function resets the page maps for kernel and memory-space. It clears out
165 * all other mappings. Use this only at system bootup!
166 */
167void paging_x86_64_reset(void)
168{
169    // Map kernel image so we don't lose ground
170    if(paging_x86_64_map_memory(mem_to_local_phys((lvaddr_t)&_start_kernel),
171                                SIZE_KERNEL_IMAGE) != 0) {
172        panic("error while mapping physical memory!");
173    }
174
175    // Map an initial amount of memory
176    if(paging_x86_64_map_memory(0, KERNEL_INIT_MEMORY) != 0) {
177        panic("error while mapping physical memory!");
178    }
179
180#ifdef __k1om__
181    /* mapping the Xeon Phi SBOX registers to provide serial input */
182    if (paging_x86_64_map_memory(XEON_PHI_SBOX_BASE, XEON_PHI_SBOX_SIZE) != 0) {
183        panic("error while mapping physical memory!");
184    }
185#endif
186
187    // Switch to new page layout
188    paging_x86_64_context_switch(mem_to_local_phys((lvaddr_t)pml4));
189}
190
191/**
192 * \brief Make a "good" PML4 table out of a page table.
193 *
194 * A "good" PML4 table is one that has all physical address space and
195 * the kernel mapped in. This function modifies the passed PML4, based
196 * at physical address 'base' accordingly. It does this by taking out
197 * the corresponding entries of the kernel's pristine PML4 table.
198 *
199 * \param base  Physical base address of PML4 table to make "good".
200 */
201void paging_x86_64_make_good_pml4(lpaddr_t base)
202{
203    union x86_64_pdir_entry *newpml4 =
204        (union x86_64_pdir_entry *)local_phys_to_mem(base);
205    int                 i;
206
207        // XXX: Disabled till vaddr_t is figured out
208    debug(SUBSYS_PAGING, "Is now a PML4: table = 0x%"PRIxLPADDR"\n", base);
209
210    // Map memory
211    for(i = X86_64_PML4_BASE(MEMORY_OFFSET); i < X86_64_PTABLE_SIZE; i++) {
212        newpml4[i] = pml4[i];
213    }
214}
215