1/**
2 * \file
3 * \brief Architecture specific kernel page table definitions
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#ifndef KERNEL_ARCH_X86_64_PAGING_H
16#define KERNEL_ARCH_X86_64_PAGING_H
17
18#include <target/x86_64/paging_kernel_target.h>
19#include <paging_kernel_helper.h>
20
21/** Physical memory page size is 2 MBytes */
22#define X86_64_MEM_PAGE_SIZE            X86_64_LARGE_PAGE_SIZE
23
24/** Mask for physical memory page */
25#define X86_64_MEM_PAGE_MASK            0x1fffff
26
27/**
28 * Resolves to required number of entries in page directory pointer table to map
29 * 'limit' number of bytes.
30 */
31#define X86_64_PDPT_ENTRIES(limit)     (X86_64_PML4_BASE((limit) - 1) + 1)
32
33/**
34 * Resolves to required number of entries in page directory to map 'limit'
35 * number of bytes.
36 */
37#define X86_64_PDIR_ENTRIES(limit)     (X86_64_PDPT_BASE((limit) - 1) + 1)
38
39/**
40 * Resolves to required number of entries in page table to map 'limit' number
41 * of bytes.
42 */
43#define X86_64_PTABLE_ENTRIES(limit)   (X86_64_PDIR_BASE((limit) - 1) + 1)
44
45/**
46 * \brief Switch context.
47 *
48 * Assigns given physical base address to the CR3 register,
49 * effectively switching context to new address space. Be
50 * cautious that you only switch to "good" page tables.
51 *
52 * \param addr  Physical base address of page table.
53 */
54static void inline paging_context_switch(lpaddr_t addr)
55{
56    paging_x86_64_context_switch(addr);
57}
58
59static lvaddr_t inline paging_map_device(lpaddr_t base, size_t size)
60{
61    return paging_x86_64_map_device(base, size);
62}
63
64static inline bool is_root_pt(enum objtype type) {
65    return type == ObjType_VNode_x86_64_pml4;
66}
67
68static inline size_t get_pte_size(void) {
69    return sizeof(union x86_64_ptable_entry);
70}
71
72static inline void do_one_tlb_flush(genvaddr_t vaddr)
73{
74    __asm__ __volatile__("invlpg %0" : : "m" (*(char *)vaddr));
75}
76
77static inline void do_selective_tlb_flush(genvaddr_t vaddr, genvaddr_t vend)
78{
79    for (genvaddr_t addr = vaddr; addr < vend; addr += X86_64_BASE_PAGE_SIZE) {
80        __asm__ __volatile__("invlpg %0" : : "m" (*(char *)addr));
81    }
82}
83
84static inline void do_full_tlb_flush(void) {
85    // XXX: FIXME: Going to reload cr3 to flush the entire TLB.
86    // This is inefficient.
87    // The current implementation is also not multicore safe.
88    // We should only invalidate the affected entry using invlpg
89    // and figure out which remote tlbs to flush.
90    uint64_t cr3;
91    __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : );
92    __asm__ __volatile__("mov %0,%%cr3" :  : "a" (cr3));
93}
94
95
96#endif // KERNEL_ARCH_X86_64_PAGING_H
97