1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <config.h>
12#include <api/syscall.h>
13#include <machine/io.h>
14#include <kernel/boot.h>
15#include <model/statedata.h>
16#include <arch/kernel/vspace.h>
17#include <arch/kernel/boot.h>
18#include <arch/api/invocation.h>
19#include <benchmark/benchmark_track.h>
20#include <arch/kernel/tlb_bitmap.h>
21#include <mode/kernel/tlb.h>
22
23/* 'gdt_idt_ptr' is declared globally because of a C-subset restriction.
24 * It is only used in init_drts(), which therefore is non-reentrant.
25 */
26gdt_idt_ptr_t gdt_idt_ptr;
27
28/* initialise the Task State Segment (TSS) */
29
30BOOT_CODE void
31init_tss(tss_t* tss)
32{
33    *tss = tss_new(
34               sizeof(*tss),   /* io_map_base  */
35               0,              /* trap         */
36               SEL_NULL,       /* sel_ldt      */
37               SEL_NULL,       /* gs           */
38               SEL_NULL,       /* fs           */
39               SEL_NULL,       /* ds           */
40               SEL_NULL,       /* ss           */
41               SEL_NULL,       /* cs           */
42               SEL_NULL,       /* es           */
43               0,              /* edi          */
44               0,              /* esi          */
45               0,              /* ebp          */
46               0,              /* esp          */
47               0,              /* ebx          */
48               0,              /* edx          */
49               0,              /* ecx          */
50               0,              /* eax          */
51               0,              /* eflags       */
52               0,              /* eip          */
53               0,              /* cr3          */
54               SEL_NULL,       /* ss2          */
55               0,              /* esp2         */
56               SEL_NULL,       /* ss1          */
57               0,              /* esp1         */
58               SEL_DS_0,       /* ss0          */
59               0,              /* esp0         */
60               0               /* prev_task    */
61           );
62    memset(&x86KSGlobalState[CURRENT_CPU_INDEX()].x86KStss.io_map[0], 0xff, sizeof(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KStss.io_map));
63}
64/* initialise Global Descriptor Table (GDT) */
65
66BOOT_CODE void
67init_gdt(gdt_entry_t* gdt, tss_t* tss)
68{
69    uint32_t tss_addr = (uint32_t)tss;
70
71    /* Set the NULL descriptor */
72    gdt[GDT_NULL] = gdt_entry_gdt_null_new();
73
74    /* 4GB flat kernel code segment on ring 0 descriptor */
75    gdt[GDT_CS_0] = gdt_entry_gdt_code_new(
76                        0,      /* Base high 8 bits             */
77                        1,      /* Granularity                  */
78                        1,      /* Operation size               */
79                        0,      /* Available                    */
80                        0xf,    /* Segment limit high 4 bits    */
81                        1,      /* Present                      */
82                        0,      /* Descriptor privilege level   */
83                        1,      /* readable                     */
84                        1,      /* accessed                     */
85                        0,      /* Base middle 8 bits           */
86                        0,      /* Base low 16 bits             */
87                        0xffff  /* Segment limit low 16 bits    */
88                    );
89
90    /* 4GB flat kernel data segment on ring 0 descriptor */
91    gdt[GDT_DS_0] = gdt_entry_gdt_data_new(
92                        0,      /* Base high 8 bits             */
93                        1,      /* Granularity                  */
94                        1,      /* Operation size               */
95                        0,      /* Available                    */
96                        0xf,    /* Segment limit high 4 bits    */
97                        1,      /* Present                      */
98                        0,      /* Descriptor privilege level   */
99                        1,      /* writable                     */
100                        1,      /* accessed                     */
101                        0,      /* Base middle 8 bits           */
102                        0,      /* Base low 16 bits             */
103                        0xffff  /* Segment limit low 16 bits    */
104                    );
105
106    /* 4GB flat userland code segment on ring 3 descriptor */
107    gdt[GDT_CS_3] = gdt_entry_gdt_code_new(
108                        0,      /* Base high 8 bits             */
109                        1,      /* Granularity                  */
110                        1,      /* Operation size               */
111                        0,      /* Available                    */
112                        0xf,    /* Segment limit high 4 bits    */
113                        1,      /* Present                      */
114                        3,      /* Descriptor privilege level   */
115                        1,      /* readable                     */
116                        1,      /* accessed                     */
117                        0,      /* Base middle 8 bits           */
118                        0,      /* Base low 16 bits             */
119                        0xffff  /* Segment limit low 16 bits    */
120                    );
121
122    /* 4GB flat userland data segment on ring 3 descriptor */
123    gdt[GDT_DS_3] = gdt_entry_gdt_data_new(
124                        0,      /* Base high 8 bits             */
125                        1,      /* Granularity                  */
126                        1,      /* Operation size               */
127                        0,      /* Available                    */
128                        0xf,    /* Segment limit high 4 bits    */
129                        1,      /* Present                      */
130                        3,      /* Descriptor privilege level   */
131                        1,      /* writable                     */
132                        1,      /* accessed                     */
133                        0,      /* Base middle 8 bits           */
134                        0,      /* Base low 16 bits             */
135                        0xffff  /* Segment limit low 16 bits    */
136                    );
137
138    /* Task State Segment (TSS) descriptor */
139    gdt[GDT_TSS] = gdt_entry_gdt_tss_new(
140                       tss_addr >> 24,              /* base_high 8 bits     */
141                       0,                           /* granularity          */
142                       0,                           /* avl                  */
143                       0,                           /* limit_high 4 bits    */
144                       1,                           /* present              */
145                       0,                           /* dpl                  */
146                       0,                           /* busy                 */
147                       1,                           /* always_true          */
148                       (tss_addr >> 16) & 0xff,     /* base_mid 8 bits      */
149                       (tss_addr & 0xffff),         /* base_low 16 bits     */
150                       sizeof(tss_io_t) - 1         /* limit_low 16 bits    */
151                   );
152
153    /* pre-init the userland data segment used for TLS */
154    gdt[GDT_TLS] = gdt_entry_gdt_data_new(
155                       0,      /* Base high 8 bits             */
156                       1,      /* Granularity                  */
157                       1,      /* Operation size               */
158                       0,      /* Available                    */
159                       0xf,    /* Segment limit high 4 bits    */
160                       1,      /* Present                      */
161                       3,      /* Descriptor privilege level   */
162                       1,      /* writable                     */
163                       1,      /* accessed                     */
164                       0,      /* Base middle 8 bits           */
165                       0,      /* Base low 16 bits             */
166                       0xffff  /* Segment limit low 16 bits    */
167                   );
168
169    /* pre-init the userland data segment used for the IPC buffer */
170    gdt[GDT_IPCBUF] = gdt_entry_gdt_data_new(
171                          0,      /* Base high 8 bits             */
172                          1,      /* Granularity                  */
173                          1,      /* Operation size               */
174                          0,      /* Available                    */
175                          0xf,    /* Segment limit high 4 bits    */
176                          1,      /* Present                      */
177                          3,      /* Descriptor privilege level   */
178                          1,      /* writable                     */
179                          1,      /* accessed                     */
180                          0,      /* Base middle 8 bits           */
181                          0,      /* Base low 16 bits             */
182                          0xffff  /* Segment limit low 16 bits    */
183                      );
184}
185
186/* initialise the Interrupt Descriptor Table (IDT) */
187
188BOOT_CODE void
189init_idt_entry(idt_entry_t* idt, interrupt_t interrupt, void(*handler)(void))
190{
191    uint32_t handler_addr = (uint32_t)handler;
192    uint32_t dpl = 3;
193
194    if (interrupt < int_trap_min && interrupt != int_software_break_request) {
195        dpl = 0;
196    }
197
198    idt[interrupt] = idt_entry_interrupt_gate_new(
199                         handler_addr >> 16,   /* offset_high  */
200                         1,                    /* present      */
201                         dpl,                  /* dpl          */
202                         1,                    /* gate_size    */
203                         SEL_CS_0,             /* seg_selector */
204                         handler_addr & 0xffff /* offset_low   */
205                     );
206}
207
208BOOT_CODE bool_t
209map_kernel_window(
210    uint32_t num_ioapic,
211    paddr_t*   ioapic_paddrs,
212    uint32_t   num_drhu,
213    paddr_t*   drhu_list
214)
215{
216    paddr_t  phys;
217    uint32_t idx;
218    pde_t    pde;
219    pte_t    pte;
220    unsigned int UNUSED i;
221
222    /* Mapping of PPTR_BASE (virtual address) to kernel's PADDR_BASE
223     * up to end of virtual address space except for the last large page.
224     */
225    phys = PADDR_BASE;
226    idx = PPTR_BASE >> LARGE_PAGE_BITS;
227
228    /* PPTR_TOP differs whether CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER
229     * is enabled or not.
230     */
231    while (idx < (PPTR_TOP >> LARGE_PAGE_BITS)) {
232        pde = pde_pde_large_new(
233                  phys,   /* page_base_address    */
234                  0,      /* pat                  */
235                  0,      /* avl                  */
236                  1,      /* global               */
237                  0,      /* dirty                */
238                  0,      /* accessed             */
239                  0,      /* cache_disabled       */
240                  0,      /* write_through        */
241                  0,      /* super_user           */
242                  1,      /* read_write           */
243                  1       /* present              */
244              );
245        ia32KSGlobalPD[idx] = pde;
246        phys += BIT(LARGE_PAGE_BITS);
247        idx++;
248    }
249
250    /* crosscheck whether we have mapped correctly so far */
251    assert(phys == PADDR_TOP);
252
253#ifdef CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER
254    /* Map global page table for the log buffer */
255    pde = pde_pde_pt_new(
256              pptr_to_paddr(ia32KSGlobalLogPT), /* pt_base_address  */
257              0,                 /* avl              */
258              0,                 /* accessed         */
259              0,                 /* cache_disabled   */
260              0,                 /* write_through    */
261              0,                 /* super_user       */
262              1,                 /* read_write       */
263              1                  /* present          */
264          );
265
266    ia32KSGlobalPD[idx] = pde;
267    phys += BIT(LARGE_PAGE_BITS);
268    assert(idx == (KS_LOG_PPTR >> LARGE_PAGE_BITS));
269    idx++;
270#endif /* CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER */
271
272#ifdef ENABLE_SMP_SUPPORT
273    /* initialize the TLB bitmap */
274    tlb_bitmap_init(ia32KSGlobalPD);
275
276    phys += TLBBITMAP_PD_RESERVED;
277    idx += TLBBITMAP_ROOT_ENTRIES;
278#endif /* ENABLE_SMP_SUPPORT */
279
280    /* map page table of last 4M of virtual address space to page directory */
281    pde = pde_pde_pt_new(
282              pptr_to_paddr(ia32KSGlobalPT), /* pt_base_address  */
283              0,                 /* avl              */
284              0,                 /* accessed         */
285              0,                 /* cache_disabled   */
286              0,                 /* write_through    */
287              0,                 /* super_user       */
288              1,                 /* read_write       */
289              1                  /* present          */
290          );
291    ia32KSGlobalPD[idx] = pde;
292
293    /* Start with an empty guard page preceding the stack. */
294    idx = 0;
295    pte = pte_new(
296              0,      /* page_base_address    */
297              0,      /* avl                  */
298              0,      /* global               */
299              0,      /* pat                  */
300              0,      /* dirty                */
301              0,      /* accessed             */
302              0,      /* cache_disabled       */
303              0,      /* write_through        */
304              0,      /* super_user           */
305              0,      /* read_write           */
306              0       /* present              */
307          );
308    ia32KSGlobalPT[idx] = pte;
309    idx++;
310
311    /* null mappings up to PPTR_KDEV */
312
313    while (idx < (PPTR_KDEV & MASK(LARGE_PAGE_BITS)) >> PAGE_BITS) {
314        pte = pte_new(
315                  0,      /* page_base_address    */
316                  0,      /* avl                  */
317                  0,      /* global               */
318                  0,      /* pat                  */
319                  0,      /* dirty                */
320                  0,      /* accessed             */
321                  0,      /* cache_disabled       */
322                  0,      /* write_through        */
323                  0,      /* super_user           */
324                  0,      /* read_write           */
325                  0       /* present              */
326              );
327        ia32KSGlobalPT[idx] = pte;
328        idx++;
329    }
330
331    /* map kernel devices (devices only used by the kernel) */
332    if (!map_kernel_window_devices(ia32KSGlobalPT, num_ioapic, ioapic_paddrs, num_drhu, drhu_list)) {
333        return false;
334    }
335
336    invalidateLocalPageStructureCache();
337    return true;
338}
339
340/* Note: this function will invalidate any pointers previously returned from this function */
341BOOT_CODE void*
342map_temp_boot_page(void* entry, uint32_t large_pages)
343{
344    void* replacement_vaddr;
345    unsigned int i;
346    unsigned int offset_in_page;
347
348    unsigned int phys_pg_start = (unsigned int)(entry) & ~MASK(LARGE_PAGE_BITS);
349    unsigned int virt_pd_start = (PPTR_BASE >> LARGE_PAGE_BITS) - large_pages;
350    unsigned int virt_pg_start = PPTR_BASE - (large_pages << LARGE_PAGE_BITS);
351
352    for (i = 0; i < large_pages; ++i) {
353        unsigned int pg_offset = i << LARGE_PAGE_BITS; // num pages since start * page size
354
355        *(get_boot_pd() + virt_pd_start + i) = pde_pde_large_new(
356                                                   phys_pg_start + pg_offset, /* physical address */
357                                                   0, /* pat            */
358                                                   0, /* avl            */
359                                                   1, /* global         */
360                                                   0, /* dirty          */
361                                                   0, /* accessed       */
362                                                   0, /* cache_disabled */
363                                                   0, /* write_through  */
364                                                   0, /* super_user     */
365                                                   1, /* read_write     */
366                                                   1  /* present        */
367                                               );
368        invalidateLocalTranslationSingle(virt_pg_start + pg_offset);
369    }
370
371    // assign replacement virtual addresses page
372    offset_in_page = (unsigned int)(entry) & MASK(LARGE_PAGE_BITS);
373    replacement_vaddr = (void*)(virt_pg_start + offset_in_page);
374
375    invalidateLocalPageStructureCache();
376
377    return replacement_vaddr;
378}
379
380/* initialise CPU's descriptor table registers (GDTR, IDTR, LDTR, TR) */
381
382BOOT_CODE void
383init_dtrs(void)
384{
385    /* setup the GDT pointer and limit and load into GDTR */
386    gdt_idt_ptr.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
387    gdt_idt_ptr.base = (uint32_t)x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt;
388    ia32_install_gdt(&gdt_idt_ptr);
389
390    /* setup the IDT pointer and limit and load into IDTR */
391    gdt_idt_ptr.limit = (sizeof(idt_entry_t) * (int_max + 1)) - 1;
392    gdt_idt_ptr.base = (uint32_t)x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSidt;
393    ia32_install_idt(&gdt_idt_ptr);
394
395    /* load NULL LDT selector into LDTR */
396    ia32_install_ldt(SEL_NULL);
397
398    /* load TSS selector into Task Register (TR) */
399    ia32_install_tss(SEL_TSS);
400
401    if (config_set(CONFIG_FSGSBASE_MSR)) {
402        ia32_load_gs(SEL_TLS);
403        ia32_load_fs(SEL_IPCBUF);
404    }
405}
406
407static BOOT_CODE cap_t
408create_it_page_table_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
409{
410    cap_t cap;
411    cap = cap_page_table_cap_new(
412              1,    /* capPTIsMapped      */
413              asid, /* capPTMappedASID    */
414              vptr, /* capPTMappedAddress */
415              pptr  /* capPTBasePtr       */
416          );
417    if (asid != asidInvalid) {
418        map_it_pt_cap(vspace_cap, cap);
419    }
420    return cap;
421}
422
423static BOOT_CODE cap_t
424create_it_page_directory_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
425{
426    cap_t cap;
427    cap = cap_page_directory_cap_new(
428              true,    /* capPDIsMapped   */
429              IT_ASID, /* capPDMappedASID */
430              vptr,    /* capPDMappedAddress */
431              pptr  /* capPDBasePtr    */
432          );
433    if (asid != asidInvalid && cap_get_capType(vspace_cap) != cap_null_cap) {
434        map_it_pd_cap(vspace_cap, cap);
435    }
436    return cap;
437}
438
439/* Create an address space for the initial thread.
440 * This includes page directory and page tables */
441BOOT_CODE cap_t
442create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
443{
444    cap_t      vspace_cap;
445    vptr_t     vptr;
446    pptr_t     pptr;
447    seL4_SlotPos slot_pos_before;
448    seL4_SlotPos slot_pos_after;
449
450    slot_pos_before = ndks_boot.slot_pos_cur;
451    cap_t pd_cap;
452    pptr_t pd_pptr;
453    /* just create single PD obj and cap */
454    pd_pptr = alloc_region(seL4_PageDirBits);
455    if (!pd_pptr) {
456        return cap_null_cap_new();
457    }
458    memzero(PDE_PTR(pd_pptr), 1 << seL4_PageDirBits);
459    copyGlobalMappings((vspace_root_t*)pd_pptr);
460    pd_cap = create_it_page_directory_cap(cap_null_cap_new(), pd_pptr, 0, IT_ASID);
461    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), pd_cap);
462    vspace_cap = pd_cap;
463
464    /* create all PT objs and caps necessary to cover userland image */
465
466    for (vptr = ROUND_DOWN(it_v_reg.start, PT_INDEX_BITS + PAGE_BITS);
467            vptr < it_v_reg.end;
468            vptr += BIT(PT_INDEX_BITS + PAGE_BITS)) {
469        pptr = alloc_region(seL4_PageTableBits);
470        if (!pptr) {
471            return cap_null_cap_new();
472        }
473        memzero(PTE_PTR(pptr), 1 << seL4_PageTableBits);
474        if (!provide_cap(root_cnode_cap,
475                         create_it_page_table_cap(vspace_cap, pptr, vptr, IT_ASID))
476           ) {
477            return cap_null_cap_new();
478        }
479    }
480
481    slot_pos_after = ndks_boot.slot_pos_cur;
482    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
483        slot_pos_before, slot_pos_after
484    };
485
486    return vspace_cap;
487}
488
489static BOOT_CODE cap_t
490create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large, vm_page_map_type_t map_type)
491{
492    vm_page_size_t frame_size;
493
494    if (use_large) {
495        frame_size = X86_LargePage;
496    } else {
497        frame_size = X86_SmallPage;
498    }
499
500    return
501        cap_frame_cap_new(
502            frame_size,                    /* capFSize           */
503            ASID_LOW(asid),                /* capFMappedASIDLow  */
504            vptr,                          /* capFMappedAddress  */
505            map_type,                      /* capFMapType        */
506            false,                         /* capFIsDevice       */
507            ASID_HIGH(asid),               /* capFMappedASIDHigh */
508            wordFromVMRights(VMReadWrite), /* capFVMRights       */
509            pptr                           /* capFBasePtr        */
510        );
511}
512
513BOOT_CODE cap_t
514create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
515{
516    return create_it_frame_cap(pptr, 0, asidInvalid, use_large, X86_MappingNone);
517}
518
519BOOT_CODE cap_t
520create_mapped_it_frame_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large, bool_t executable UNUSED)
521{
522    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large, X86_MappingVSpace);
523    map_it_frame_cap(vspace_cap, cap);
524    return cap;
525}
526
527/* ==================== BOOT CODE FINISHES HERE ==================== */
528
529pde_t CONST makeUserPDELargePage(paddr_t paddr, vm_attributes_t vm_attr, vm_rights_t vm_rights)
530{
531    return pde_pde_large_new(
532               paddr,                                          /* page_base_address    */
533               vm_attributes_get_x86PATBit(vm_attr),           /* pat                  */
534               0,                                              /* avl                  */
535               0,                                              /* global               */
536               0,                                              /* dirty                */
537               0,                                              /* accessed             */
538               vm_attributes_get_x86PCDBit(vm_attr),           /* cache_disabled       */
539               vm_attributes_get_x86PWTBit(vm_attr),           /* write_through        */
540               SuperUserFromVMRights(vm_rights),               /* super_user           */
541               WritableFromVMRights(vm_rights),                /* read_write           */
542               1                                               /* present              */
543           );
544}
545
546pde_t CONST makeUserPDEPageTable(paddr_t paddr, vm_attributes_t vm_attr)
547{
548    return pde_pde_pt_new(
549               paddr,                                      /* pt_base_address  */
550               0,                                          /* avl              */
551               0,                                          /* accessed         */
552               vm_attributes_get_x86PCDBit(vm_attr),       /* cache_disabled   */
553               vm_attributes_get_x86PWTBit(vm_attr),       /* write_through    */
554               1,                                          /* super_user       */
555               1,                                          /* read_write       */
556               1                                           /* present          */
557           );
558}
559
560pde_t CONST makeUserPDEInvalid(void)
561{
562    /* The bitfield only declares two kinds of PDE entries (page tables or large pages)
563     * and an invalid entry should really be a third type, but we can simulate it by
564     * creating an invalid (present bit 0) entry of either of the defined types */
565    return pde_pde_pt_new(
566               0,  /* pt_base_address  */
567               0,  /* avl              */
568               0,  /* accessed         */
569               0,  /* cache_disabled   */
570               0,  /* write_through    */
571               0,  /* super_user       */
572               0,  /* read_write       */
573               0   /* present          */
574           );
575}
576
577pte_t CONST makeUserPTE(paddr_t paddr, vm_attributes_t vm_attr, vm_rights_t vm_rights)
578{
579    return pte_new(
580               paddr,                                          /* page_base_address    */
581               0,                                              /* avl                  */
582               0,                                              /* global               */
583               vm_attributes_get_x86PATBit(vm_attr),           /* pat                  */
584               0,                                              /* dirty                */
585               0,                                              /* accessed             */
586               vm_attributes_get_x86PCDBit(vm_attr),           /* cache_disabled       */
587               vm_attributes_get_x86PWTBit(vm_attr),           /* write_through        */
588               SuperUserFromVMRights(vm_rights),               /* super_user           */
589               WritableFromVMRights(vm_rights),                /* read_write           */
590               1                                               /* present              */
591           );
592}
593
594
595pte_t CONST makeUserPTEInvalid(void)
596{
597    return pte_new(
598               0,                   /* page_base_address    */
599               0,                   /* avl                  */
600               0,                   /* global               */
601               0,                   /* pat                  */
602               0,                   /* dirty                */
603               0,                   /* accessed             */
604               0,                   /* cache_disabled       */
605               0,                   /* write_through        */
606               0,                   /* super_user           */
607               0,                   /* read_write           */
608               0                    /* present              */
609           );
610}
611
612void setVMRoot(tcb_t* tcb)
613{
614    cap_t               threadRoot;
615    vspace_root_t *vspace_root;
616    asid_t              asid;
617    findVSpaceForASID_ret_t find_ret;
618
619    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
620
621    vspace_root = getValidNativeRoot(threadRoot);
622    if (!vspace_root) {
623        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
624        setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
625        return;
626    }
627
628    asid = cap_get_capMappedASID(threadRoot);
629    find_ret = findVSpaceForASID(asid);
630    if (find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspace_root) {
631        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
632        setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
633        return;
634    }
635
636    /* only set PD if we change it, otherwise we flush the TLB needlessly */
637    if (getCurrentPD() != pptr_to_paddr(vspace_root)) {
638        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
639        SMP_COND_STATEMENT(tlb_bitmap_set(vspace_root, getCurrentCPUIndex());)
640
641        setCurrentPD(pptr_to_paddr(vspace_root));
642    }
643}
644
645void hwASIDInvalidate(asid_t asid, vspace_root_t *vspace)
646{
647    /* 32-bit does not have PCID */
648    return;
649}
650
651exception_t
652decodeX86ModeMMUInvocation(
653    word_t invLabel,
654    word_t length,
655    cptr_t cptr,
656    cte_t* cte,
657    cap_t cap,
658    extra_caps_t excaps,
659    word_t* buffer
660)
661{
662    switch (cap_get_capType(cap)) {
663    case cap_page_directory_cap:
664        return decodeIA32PageDirectoryInvocation(invLabel, length, cte, cap, excaps, buffer);
665
666    default:
667        fail("Invalid arch cap type");
668    }
669}
670
671
672bool_t modeUnmapPage(vm_page_size_t page_size, vspace_root_t *vroot, vptr_t vaddr, void *pptr)
673{
674    fail("Invalid page type");
675    return false;
676}
677
678exception_t decodeX86ModeMapRemapPage(word_t invLabel, vm_page_size_t page_size, cte_t *cte, cap_t cap, vspace_root_t *vroot, vptr_t vaddr, paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t vm_attr)
679{
680    fail("Invalid Page type");
681}
682
683#ifdef CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER
684exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
685{
686    lookupCapAndSlot_ret_t lu_ret;
687    vm_page_size_t frameSize;
688    pptr_t frame_pptr;
689
690    /* faulting section */
691    lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
692
693    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
694        userError("Invalid cap #%lu.", frame_cptr);
695        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
696
697        return EXCEPTION_SYSCALL_ERROR;
698    }
699
700    if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
701        userError("Invalid cap. Log buffer should be of a frame cap");
702        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
703
704        return EXCEPTION_SYSCALL_ERROR;
705    }
706
707    frameSize = cap_frame_cap_get_capFSize(lu_ret.cap);
708
709    if (frameSize != X86_LargePage) {
710        userError("Invalid size for log Buffer. The kernel expects at least 1M log buffer");
711        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
712
713        return EXCEPTION_SYSCALL_ERROR;
714    }
715
716    frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap);
717
718    ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
719
720    /* fill global log page table with mappings */
721    for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
722        paddr_t physical_address = ksUserLogBuffer + (idx << seL4_PageBits);
723
724        pte_t pte = pte_new(
725                        physical_address,   /* page_base_address    */
726                        0,                  /* avl                  */
727                        1,                  /* global               */
728                        VMKernelOnly,       /* pat                  */
729                        0,                  /* dirty                */
730                        0,                  /* accessed             */
731                        0,                  /* cache_disabled       */
732                        1,                  /* write_through        */
733                        1,                  /* super_user           */
734                        1,                  /* read_write           */
735                        1                   /* present              */
736                    );
737
738        ia32KSGlobalLogPT[idx] = pte;
739        invalidateTLBEntry(KS_LOG_PPTR + (idx << seL4_PageBits), MASK(ksNumCPUs));
740    }
741
742    return EXCEPTION_NONE;
743}
744#endif /* CONFIG_BENCHMARK_USE_KERNEL_LOG_BUFFER */
745