1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
8 * See "LICENSE_GPLv2.txt" for details.
9 *
10 * @TAG(DATA61_GPL)
11 */
12
13#ifndef __ARCH_MODE_MACHINE_H_
14#define __ARCH_MODE_MACHINE_H_
15
16#include <config.h>
17#include <arch/model/statedata.h>
18#include <arch/machine/cpu_registers.h>
19#include <arch/model/smp.h>
20#include <plat_mode/machine/hardware.h>
21
22static inline cr3_t makeCR3(paddr_t addr, word_t pcid)
23{
24    return cr3_new(addr, config_set(CONFIG_SUPPORT_PCID) ? pcid : 0);
25}
26
27/* Address space control */
28static inline cr3_t getCurrentCR3(void)
29{
30#ifdef CONFIG_KERNEL_SKIM_WINDOW
31    /* If we're running in the kernel to call this function, then by definition
32     * this must be the current cr3 */
33    return cr3_new(kpptr_to_paddr(x64KSKernelPML4), 0);
34#else
35    return MODE_NODE_STATE(x64KSCurrentCR3);
36#endif
37}
38
39static inline cr3_t getCurrentUserCR3(void)
40{
41#ifdef CONFIG_KERNEL_SKIM_WINDOW
42    // Construct a cr3_t from the state word, dropping any command information
43    // if needed
44    word_t cr3_word = MODE_NODE_STATE(x64KSCurrentUserCR3);
45    cr3_t cr3_ret;
46    if (config_set(CONFIG_SUPPORT_PCID)) {
47        cr3_word &= ~BIT(63);
48    }
49    cr3_ret.words[0] = cr3_word;
50    return cr3_ret;
51#else
52    return getCurrentCR3();
53#endif
54}
55
56static inline paddr_t getCurrentUserVSpaceRoot(void)
57{
58    return cr3_get_pml4_base_address(getCurrentUserCR3());
59}
60
61static inline void setCurrentCR3(cr3_t cr3, word_t preserve_translation)
62{
63#ifdef CONFIG_KERNEL_SKIM_WINDOW
64    /* we should only ever be enabling the kernel window, as the bulk of the
65     * cr3 loading when using the SKIM window will happen on kernel entry/exit
66     * in assembly stubs */
67    assert(cr3_get_pml4_base_address(cr3) == kpptr_to_paddr(x64KSKernelPML4));
68#else
69    MODE_NODE_STATE(x64KSCurrentCR3) = cr3;
70#endif
71    word_t cr3_word = cr3.words[0];
72    if (config_set(CONFIG_SUPPORT_PCID)) {
73        if (preserve_translation) {
74            cr3_word |= BIT(63);
75        }
76    } else {
77        assert(cr3_get_pcid(cr3) == 0);
78    }
79    write_cr3(cr3_word);
80}
81
82/* there is no option for preservation translation when setting the user cr3
83   as it is assumed you want it preserved as you are doing a context switch.
84   If translation needs to be flushed then setCurrentCR3 should be used instead */
85static inline void setCurrentUserCR3(cr3_t cr3)
86{
87#ifdef CONFIG_KERNEL_SKIM_WINDOW
88    // To make the restore stubs more efficient we will set the preserve_translation
89    // command in the state. If we look at the cr3 later on we need to remember to
90    // remove that bit
91    word_t cr3_word = cr3.words[0];
92    if (config_set(CONFIG_SUPPORT_PCID)) {
93        cr3_word |= BIT(63);
94    }
95    MODE_NODE_STATE(x64KSCurrentUserCR3) = cr3_word;
96#else
97    setCurrentCR3(cr3, 1);
98#endif
99}
100
101static inline void setCurrentVSpaceRoot(paddr_t addr, word_t pcid)
102{
103    setCurrentCR3(makeCR3(addr, pcid), 1);
104}
105
106static inline void setCurrentUserVSpaceRoot(paddr_t addr, word_t pcid)
107{
108#ifdef CONFIG_KERNEL_SKIM_WINDOW
109    setCurrentUserCR3(makeCR3(addr, pcid));
110#else
111    setCurrentVSpaceRoot(addr, pcid);
112#endif
113}
114
115/* GDT installation */
116void x64_install_gdt(gdt_idt_ptr_t* gdt_idt_ptr);
117
118/* IDT installation */
119void x64_install_idt(gdt_idt_ptr_t* gdt_idt_ptr);
120
121/* LDT installation */
122void x64_install_ldt(uint32_t ldt_sel);
123
124/* TSS installation */
125void x64_install_tss(uint32_t tss_sel);
126
127void handle_fastsyscall(void);
128
129void init_syscall_msrs(void);
130
131/* Get current stack pointer */
132static inline void* get_current_esp(void)
133{
134    word_t stack;
135    void *result;
136    asm volatile("movq %[stack_address], %[result]" : [result] "=r"(result) : [stack_address] "r"(&stack));
137    return result;
138}
139
140typedef struct invpcid_desc {
141    uint64_t    asid;
142    uint64_t    addr;
143} invpcid_desc_t;
144
145#define INVPCID_TYPE_ADDR           0
146#define INVPCID_TYPE_SINGLE         1
147#define INVPCID_TYPE_ALL_GLOBAL     2   /* also invalidate global */
148#define INVPCID_TYPE_ALL            3
149
150static inline void invalidateLocalPCID(word_t type, void *vaddr, asid_t asid)
151{
152    if (config_set(CONFIG_SUPPORT_PCID)) {
153        invpcid_desc_t desc;
154        desc.asid = asid & 0xfff;
155        desc.addr = (uint64_t)vaddr;
156        asm volatile ("invpcid %1, %0" :: "r"(type), "m"(desc));
157    } else {
158        switch (type) {
159        case INVPCID_TYPE_ADDR:
160            asm volatile("invlpg (%[vptr])" :: [vptr] "r"(vaddr));
161            break;
162        case INVPCID_TYPE_SINGLE:
163        case INVPCID_TYPE_ALL:
164            /* reload CR3 to perform a full flush */
165            setCurrentCR3(getCurrentCR3(), 0);
166            break;
167        case INVPCID_TYPE_ALL_GLOBAL: {
168            /* clear and reset the global bit to flush global mappings */
169            unsigned long cr4 = read_cr4();
170            write_cr4(cr4 & ~BIT(7));
171            write_cr4(cr4);
172        }
173        break;
174        }
175    }
176}
177
178static inline void invalidateLocalTranslationSingle(vptr_t vptr)
179{
180    /* As this may be used to invalidate global mappings by the kernel,
181     * and as its only used in boot code, we can just invalidate
182     * absolutely everything form the tlb */
183    invalidateLocalPCID(INVPCID_TYPE_ALL_GLOBAL, (void*)0, 0);
184}
185
186static inline void invalidateLocalTranslationSingleASID(vptr_t vptr, asid_t asid)
187{
188    invalidateLocalPCID(INVPCID_TYPE_ADDR, (void*)vptr, asid);
189}
190
191static inline void invalidateLocalTranslationAll(void)
192{
193    invalidateLocalPCID(INVPCID_TYPE_ALL_GLOBAL, (void*)0, 0);
194}
195
196static inline void invalidateLocalPageStructureCacheASID(paddr_t root, asid_t asid)
197{
198    if (config_set(CONFIG_SUPPORT_PCID)) {
199        /* store our previous cr3 */
200        cr3_t cr3 = getCurrentCR3();
201        /* we load the new vspace root, invalidating translation for it
202         * and then switch back to the old CR3. We do this in a single
203         * asm block to ensure we only rely on the code being mapped in
204         * the temporary address space and not the stack. We preserve the
205         * translation of the old cr3 */
206        asm volatile(
207            "mov %[new_cr3], %%cr3\n"
208            "mov %[old_cr3], %%cr3\n"
209            ::
210            [new_cr3] "r" (makeCR3(root, asid).words[0]),
211            [old_cr3] "r" (cr3.words[0] | BIT(63))
212        );
213    } else {
214        /* just invalidate the page structure cache as per normal, by
215         * doing a dummy invalidation of a tlb entry */
216        asm volatile("invlpg (%[vptr])" :: [vptr] "r"(0));
217    }
218}
219
220static inline void swapgs(void)
221{
222    asm volatile("swapgs");
223}
224
225static inline rdmsr_safe_result_t x86_rdmsr_safe(const uint32_t reg)
226{
227    uint32_t low;
228    uint32_t high;
229    word_t returnto;
230    word_t temp;
231    rdmsr_safe_result_t result;
232    asm volatile(
233        "movabs $1f, %[temp] \n"
234        "movq %[temp], (%[returnto_addr]) \n\
235         rdmsr \n\
236         1: \n\
237         movq (%[returnto_addr]), %[returnto] \n\
238         movq $0, (%[returnto_addr])"
239        : [returnto] "=&r" (returnto),
240        [temp] "=&r" (temp),
241        [high] "=&d" (high),
242        [low] "=&a" (low)
243        : [returnto_addr] "r" (&ARCH_NODE_STATE(x86KSGPExceptReturnTo)),
244        [reg] "c" (reg)
245        : "memory"
246    );
247    result.success = returnto != 0;
248    result.value = ((uint64_t)high << 32) | (uint64_t)low;
249    return result;
250}
251
252#ifdef CONFIG_FSGSBASE_INST
253
254static inline void x86_write_fs_base_impl(word_t base)
255{
256    asm volatile ("wrfsbase %0"::"r"(base));
257}
258
259static inline void x86_write_gs_base_impl(word_t base)
260{
261    asm volatile ("wrgsbase %0"::"r"(base));
262}
263
264static inline word_t x86_read_fs_base_impl(void)
265{
266    word_t base = 0;
267    asm volatile ("rdfsbase %0":"=r"(base));
268    return base;
269}
270
271static inline word_t x86_read_gs_base_impl(void)
272{
273    word_t base = 0;
274    asm volatile ("rdgsbase %0":"=r"(base));
275    return base;
276}
277
278#endif
279
280#endif /* __ARCH_MODE_MACHINE_H_ */
281