1/**
2 * \file
3 * \brief Architecture specific kernel page table definitions
4 */
5
6/*
7 * Copyright (c) 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#ifndef KERNEL_ARCH_X86_32_PAGING_H
16#define KERNEL_ARCH_X86_32_PAGING_H
17
18#include <target/x86_32/paging_kernel_target.h>
19#include <paging_kernel_helper.h>
20
21#if defined(CONFIG_PAE) || defined(CONFIG_PSE)
22/** Physical memory page size is 2 MBytes */
23#       define X86_32_MEM_PAGE_SIZE           X86_32_LARGE_PAGE_SIZE
24#else
25/** Physical memory page size is 4 KBytes */
26#       define X86_32_MEM_PAGE_SIZE           X86_32_BASE_PAGE_SIZE
27
28#       define MEM_PTABLE_SIZE                X86_32_PTABLE_ENTRIES(X86_32_PADDR_SPACE_LIMIT)
29#endif
30
31/** Mask for physical memory page */
32#define X86_32_MEM_PAGE_MASK           (X86_32_MEM_PAGE_SIZE - 1)
33
34#ifdef CONFIG_PAE
35
36/**
37 * Resolves to required number of entries in page directory to map 'limit'
38 * number of bytes.
39 */
40#       define X86_32_PDIR_ENTRIES(limit)    (X86_32_PDPTE_BASE((limit) - 1) + 1)
41
42/**
43 * Resolves to required number of entries in page table to map 'limit' number
44 * of bytes.
45 */
46#       define X86_32_PTABLE_ENTRIES(limit)  (X86_32_PDIR_BASE((limit) - 1) + 1)
47
48#else
49
50/**
51 * Resolves to required number of entries in page directory to map 'limit'
52 * number of bytes.
53 */
54#       define X86_32_PDIR_ENTRIES(limit)      1
55
56/**
57 * Resolves to required number of entries in page table to map 'limit' number
58 * of bytes.
59 */
60#       define X86_32_PTABLE_ENTRIES(limit)  (X86_32_PDIR_BASE((limit) - 1) + 1)
61
62#endif
63
64/**
65 * \brief Switch context.
66 *
67 * Assigns given physical base address to the CR3 register,
68 * effectively switching context to new address space. Be
69 * cautious that you only switch to "good" page tables.
70 *
71 * \param addr  Physical base address of page table.
72 */
73static void inline paging_context_switch(lpaddr_t addr)
74{
75    paging_x86_32_context_switch(addr);
76}
77
78static lvaddr_t inline paging_map_device(lpaddr_t base, size_t size)
79{
80    return paging_x86_32_map_device(base, size);
81}
82
83static inline bool is_root_pt(enum objtype type) {
84#ifdef CONFIG_PAE
85    return type == ObjType_VNode_x86_32_pdpt;
86#else
87    return type == ObjType_VNode_x86_32_pdir;
88#endif
89}
90
91static inline size_t get_pte_size(void) {
92    // the definition of x86_32_ptable entry is wrapped in an #ifdef CONFIG_PAE
93    // block and will thus have the correct size for both PAE and non-PAE x86_32.
94    return sizeof(union x86_32_ptable_entry);
95}
96
97static inline void do_selective_tlb_flush(genvaddr_t vaddr, genvaddr_t vend)
98{
99    assert(vaddr < ((genvaddr_t)1)<<32);
100    assert(vend < ((genvaddr_t)1)<<32);
101    uint32_t vaddr32 = (uint32_t)vaddr;
102    uint32_t vend32 = (uint32_t)vend;
103
104    for (uint32_t addr = vaddr32; addr < vend32; addr += X86_32_BASE_PAGE_SIZE) {
105        __asm__ __volatile__("invlpg %0" : : "m" (*(char *)addr));
106    }
107}
108
109static inline void do_one_tlb_flush(genvaddr_t vaddr)
110{
111    assert(vaddr < ((genvaddr_t)1)<<32);
112    uint32_t addr = (uint32_t)vaddr;
113
114    __asm__ __volatile__("invlpg %0" : : "m" (*(char *)addr));
115}
116
117static inline void do_full_tlb_flush(void)
118{
119    // XXX: FIXME: Going to reload cr3 to flush the entire TLB.
120    // This is inefficient.
121    // The current implementation is also not multicore safe.
122    // We should only invalidate the affected entry using invlpg
123    // and figure out which remote tlbs to flush.
124    uint32_t cr3;
125    __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
126    __asm__ __volatile__("mov %0,%%cr3" :  : "a" (cr3));
127}
128
129#endif // KERNEL_ARCH_X86_32_PAGING_H
130