1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <api/syscall.h>
9#include <machine/io.h>
10#include <kernel/boot.h>
11#include <model/statedata.h>
12#include <arch/kernel/vspace.h>
13#include <arch/kernel/boot.h>
14#include <arch/kernel/boot_sys.h>
15#include <arch/api/invocation.h>
16#include <benchmark/benchmark_track.h>
17#include <arch/kernel/tlb_bitmap.h>
18#include <mode/kernel/tlb.h>
19
20/* 'gdt_idt_ptr' is declared globally because of a C-subset restriction.
21 * It is only used in init_drts(), which therefore is non-reentrant.
22 */
23gdt_idt_ptr_t gdt_idt_ptr;
24
25/* initialise the Task State Segment (TSS) */
26
27BOOT_CODE void init_tss(tss_t *tss)
28{
29    *tss = tss_new(
30               sizeof(*tss),   /* io_map_base  */
31               0,              /* trap         */
32               SEL_NULL,       /* sel_ldt      */
33               SEL_NULL,       /* gs           */
34               SEL_NULL,       /* fs           */
35               SEL_NULL,       /* ds           */
36               SEL_NULL,       /* ss           */
37               SEL_NULL,       /* cs           */
38               SEL_NULL,       /* es           */
39               0,              /* edi          */
40               0,              /* esi          */
41               0,              /* ebp          */
42               0,              /* esp          */
43               0,              /* ebx          */
44               0,              /* edx          */
45               0,              /* ecx          */
46               0,              /* eax          */
47               0,              /* eflags       */
48               0,              /* eip          */
49               0,              /* cr3          */
50               SEL_NULL,       /* ss2          */
51               0,              /* esp2         */
52               SEL_NULL,       /* ss1          */
53               0,              /* esp1         */
54               SEL_DS_0,       /* ss0          */
55               0,              /* esp0         */
56               0               /* prev_task    */
57           );
58    memset(&x86KSGlobalState[CURRENT_CPU_INDEX()].x86KStss.io_map[0], 0xff,
59           sizeof(x86KSGlobalState[CURRENT_CPU_INDEX()].x86KStss.io_map));
60}
61/* initialise Global Descriptor Table (GDT) */
62
63BOOT_CODE void init_gdt(gdt_entry_t *gdt, tss_t *tss)
64{
65    uint32_t tss_addr = (uint32_t)tss;
66
67    /* Set the NULL descriptor */
68    gdt[GDT_NULL] = gdt_entry_gdt_null_new();
69
70    /* 4GB flat kernel code segment on ring 0 descriptor */
71    gdt[GDT_CS_0] = gdt_entry_gdt_code_new(
72                        0,      /* Base high 8 bits             */
73                        1,      /* Granularity                  */
74                        1,      /* Operation size               */
75                        0,      /* Available                    */
76                        0xf,    /* Segment limit high 4 bits    */
77                        1,      /* Present                      */
78                        0,      /* Descriptor privilege level   */
79                        1,      /* readable                     */
80                        1,      /* accessed                     */
81                        0,      /* Base middle 8 bits           */
82                        0,      /* Base low 16 bits             */
83                        0xffff  /* Segment limit low 16 bits    */
84                    );
85
86    /* 4GB flat kernel data segment on ring 0 descriptor */
87    gdt[GDT_DS_0] = gdt_entry_gdt_data_new(
88                        0,      /* Base high 8 bits             */
89                        1,      /* Granularity                  */
90                        1,      /* Operation size               */
91                        0,      /* Available                    */
92                        0xf,    /* Segment limit high 4 bits    */
93                        1,      /* Present                      */
94                        0,      /* Descriptor privilege level   */
95                        1,      /* writable                     */
96                        1,      /* accessed                     */
97                        0,      /* Base middle 8 bits           */
98                        0,      /* Base low 16 bits             */
99                        0xffff  /* Segment limit low 16 bits    */
100                    );
101
102    /* 4GB flat userland code segment on ring 3 descriptor */
103    gdt[GDT_CS_3] = gdt_entry_gdt_code_new(
104                        0,      /* Base high 8 bits             */
105                        1,      /* Granularity                  */
106                        1,      /* Operation size               */
107                        0,      /* Available                    */
108                        0xf,    /* Segment limit high 4 bits    */
109                        1,      /* Present                      */
110                        3,      /* Descriptor privilege level   */
111                        1,      /* readable                     */
112                        1,      /* accessed                     */
113                        0,      /* Base middle 8 bits           */
114                        0,      /* Base low 16 bits             */
115                        0xffff  /* Segment limit low 16 bits    */
116                    );
117
118    /* 4GB flat userland data segment on ring 3 descriptor */
119    gdt[GDT_DS_3] = gdt_entry_gdt_data_new(
120                        0,      /* Base high 8 bits             */
121                        1,      /* Granularity                  */
122                        1,      /* Operation size               */
123                        0,      /* Available                    */
124                        0xf,    /* Segment limit high 4 bits    */
125                        1,      /* Present                      */
126                        3,      /* Descriptor privilege level   */
127                        1,      /* writable                     */
128                        1,      /* accessed                     */
129                        0,      /* Base middle 8 bits           */
130                        0,      /* Base low 16 bits             */
131                        0xffff  /* Segment limit low 16 bits    */
132                    );
133
134    /* Task State Segment (TSS) descriptor */
135    gdt[GDT_TSS] = gdt_entry_gdt_tss_new(
136                       tss_addr >> 24,              /* base_high 8 bits     */
137                       0,                           /* granularity          */
138                       0,                           /* avl                  */
139                       0,                           /* limit_high 4 bits    */
140                       1,                           /* present              */
141                       0,                           /* dpl                  */
142                       0,                           /* busy                 */
143                       1,                           /* always_true          */
144                       (tss_addr >> 16) & 0xff,     /* base_mid 8 bits      */
145                       (tss_addr & 0xffff),         /* base_low 16 bits     */
146                       sizeof(tss_io_t) - 1         /* limit_low 16 bits    */
147                   );
148
149    gdt[GDT_FS] = gdt_entry_gdt_data_new(
150                      0,      /* Base high 8 bits             */
151                      1,      /* Granularity                  */
152                      1,      /* Operation size               */
153                      0,      /* Available                    */
154                      0xf,    /* Segment limit high 4 bits    */
155                      1,      /* Present                      */
156                      3,      /* Descriptor privilege level   */
157                      1,      /* writable                     */
158                      1,      /* accessed                     */
159                      0,      /* Base middle 8 bits           */
160                      0,      /* Base low 16 bits             */
161                      0xffff  /* Segment limit low 16 bits    */
162                  );
163
164    gdt[GDT_GS] = gdt_entry_gdt_data_new(
165                      0,      /* Base high 8 bits             */
166                      1,      /* Granularity                  */
167                      1,      /* Operation size               */
168                      0,      /* Available                    */
169                      0xf,    /* Segment limit high 4 bits    */
170                      1,      /* Present                      */
171                      3,      /* Descriptor privilege level   */
172                      1,      /* writable                     */
173                      1,      /* accessed                     */
174                      0,      /* Base middle 8 bits           */
175                      0,      /* Base low 16 bits             */
176                      0xffff  /* Segment limit low 16 bits    */
177                  );
178}
179
180/* initialise the Interrupt Descriptor Table (IDT) */
181
182BOOT_CODE void init_idt_entry(idt_entry_t *idt, interrupt_t interrupt, void(*handler)(void))
183{
184    uint32_t handler_addr = (uint32_t)handler;
185    uint32_t dpl = 3;
186
187    if (interrupt < int_trap_min && interrupt != int_software_break_request) {
188        dpl = 0;
189    }
190
191    idt[interrupt] = idt_entry_interrupt_gate_new(
192                         handler_addr >> 16,   /* offset_high  */
193                         1,                    /* present      */
194                         dpl,                  /* dpl          */
195                         1,                    /* gate_size    */
196                         SEL_CS_0,             /* seg_selector */
197                         handler_addr & 0xffff /* offset_low   */
198                     );
199}
200
201BOOT_CODE bool_t map_kernel_window(
202    uint32_t num_ioapic,
203    paddr_t   *ioapic_paddrs,
204    uint32_t   num_drhu,
205    paddr_t   *drhu_list
206)
207{
208    paddr_t  phys;
209    uint32_t idx;
210    pde_t    pde;
211    pte_t    pte;
212    unsigned int UNUSED i;
213
214    /* Mapping of PPTR_BASE (virtual address) to kernel's PADDR_BASE
215     * up to end of virtual address space except for the last large page.
216     */
217    phys = PADDR_BASE;
218    idx = PPTR_BASE >> LARGE_PAGE_BITS;
219
220    /* PPTR_TOP differs whether CONFIG_KERNEL_LOG_BUFFER
221     * is enabled or not.
222     */
223    while (idx < (PPTR_TOP >> LARGE_PAGE_BITS)) {
224        pde = pde_pde_large_new(
225                  phys,   /* page_base_address    */
226                  0,      /* pat                  */
227                  0,      /* avl                  */
228                  1,      /* global               */
229                  0,      /* dirty                */
230                  0,      /* accessed             */
231                  0,      /* cache_disabled       */
232                  0,      /* write_through        */
233                  0,      /* super_user           */
234                  1,      /* read_write           */
235                  1       /* present              */
236              );
237        ia32KSGlobalPD[idx] = pde;
238        phys += BIT(LARGE_PAGE_BITS);
239        idx++;
240    }
241
242    /* crosscheck whether we have mapped correctly so far */
243    assert(phys == PADDR_TOP);
244
245#ifdef CONFIG_KERNEL_LOG_BUFFER
246    /* Map global page table for the log buffer */
247    pde = pde_pde_pt_new(
248              pptr_to_paddr(ia32KSGlobalLogPT), /* pt_base_address  */
249              0,                 /* avl              */
250              0,                 /* accessed         */
251              0,                 /* cache_disabled   */
252              0,                 /* write_through    */
253              0,                 /* super_user       */
254              1,                 /* read_write       */
255              1                  /* present          */
256          );
257
258    ia32KSGlobalPD[idx] = pde;
259    phys += BIT(LARGE_PAGE_BITS);
260    assert(idx == (KS_LOG_PPTR >> LARGE_PAGE_BITS));
261    idx++;
262#endif /* CONFIG_KERNEL_LOG_BUFFER */
263
264#ifdef ENABLE_SMP_SUPPORT
265    /* initialize the TLB bitmap */
266    tlb_bitmap_init(ia32KSGlobalPD);
267
268    phys += TLBBITMAP_PD_RESERVED;
269    idx += TLBBITMAP_ROOT_ENTRIES;
270#endif /* ENABLE_SMP_SUPPORT */
271
272    /* map page table of last 4M of virtual address space to page directory */
273    pde = pde_pde_pt_new(
274              pptr_to_paddr(ia32KSGlobalPT), /* pt_base_address  */
275              0,                 /* avl              */
276              0,                 /* accessed         */
277              0,                 /* cache_disabled   */
278              0,                 /* write_through    */
279              0,                 /* super_user       */
280              1,                 /* read_write       */
281              1                  /* present          */
282          );
283    ia32KSGlobalPD[idx] = pde;
284
285    /* Start with an empty guard page preceding the stack. */
286    idx = 0;
287    pte = pte_new(
288              0,      /* page_base_address    */
289              0,      /* avl                  */
290              0,      /* global               */
291              0,      /* pat                  */
292              0,      /* dirty                */
293              0,      /* accessed             */
294              0,      /* cache_disabled       */
295              0,      /* write_through        */
296              0,      /* super_user           */
297              0,      /* read_write           */
298              0       /* present              */
299          );
300    ia32KSGlobalPT[idx] = pte;
301    idx++;
302
303    /* null mappings up to KDEV_BASE */
304
305    while (idx < (KDEV_BASE &MASK(LARGE_PAGE_BITS)) >> PAGE_BITS) {
306        pte = pte_new(
307                  0,      /* page_base_address    */
308                  0,      /* avl                  */
309                  0,      /* global               */
310                  0,      /* pat                  */
311                  0,      /* dirty                */
312                  0,      /* accessed             */
313                  0,      /* cache_disabled       */
314                  0,      /* write_through        */
315                  0,      /* super_user           */
316                  0,      /* read_write           */
317                  0       /* present              */
318              );
319        ia32KSGlobalPT[idx] = pte;
320        idx++;
321    }
322
323    /* map kernel devices (devices only used by the kernel) */
324    if (!map_kernel_window_devices(ia32KSGlobalPT, num_ioapic, ioapic_paddrs, num_drhu, drhu_list)) {
325        return false;
326    }
327
328    invalidateLocalPageStructureCache();
329    return true;
330}
331
332/* Note: this function will invalidate any pointers previously returned from this function */
333BOOT_CODE void *map_temp_boot_page(void *entry, uint32_t large_pages)
334{
335    void *replacement_vaddr;
336    unsigned int i;
337    unsigned int offset_in_page;
338
339    unsigned int phys_pg_start = (unsigned int)(entry) & ~MASK(LARGE_PAGE_BITS);
340    unsigned int virt_pd_start = (PPTR_BASE >> LARGE_PAGE_BITS) - large_pages;
341    unsigned int virt_pg_start = PPTR_BASE - (large_pages << LARGE_PAGE_BITS);
342
343    for (i = 0; i < large_pages; i++) {
344        unsigned int pg_offset = i << LARGE_PAGE_BITS; // num pages since start * page size
345
346        *(get_boot_pd() + virt_pd_start + i) = pde_pde_large_new(
347                                                   phys_pg_start + pg_offset, /* physical address */
348                                                   0, /* pat            */
349                                                   0, /* avl            */
350                                                   1, /* global         */
351                                                   0, /* dirty          */
352                                                   0, /* accessed       */
353                                                   0, /* cache_disabled */
354                                                   0, /* write_through  */
355                                                   0, /* super_user     */
356                                                   1, /* read_write     */
357                                                   1  /* present        */
358                                               );
359        invalidateLocalTranslationSingle(virt_pg_start + pg_offset);
360    }
361
362    // assign replacement virtual addresses page
363    offset_in_page = (unsigned int)(entry) & MASK(LARGE_PAGE_BITS);
364    replacement_vaddr = (void *)(virt_pg_start + offset_in_page);
365
366    invalidateLocalPageStructureCache();
367
368    return replacement_vaddr;
369}
370
371/* initialise CPU's descriptor table registers (GDTR, IDTR, LDTR, TR) */
372
373BOOT_CODE void init_dtrs(void)
374{
375    /* setup the GDT pointer and limit and load into GDTR */
376    gdt_idt_ptr.limit = (sizeof(gdt_entry_t) * GDT_ENTRIES) - 1;
377    gdt_idt_ptr.base = (uint32_t)x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSgdt;
378    ia32_install_gdt(&gdt_idt_ptr);
379
380    /* setup the IDT pointer and limit and load into IDTR */
381    gdt_idt_ptr.limit = (sizeof(idt_entry_t) * (int_max + 1)) - 1;
382    gdt_idt_ptr.base = (uint32_t)x86KSGlobalState[CURRENT_CPU_INDEX()].x86KSidt;
383    ia32_install_idt(&gdt_idt_ptr);
384
385    /* load NULL LDT selector into LDTR */
386    ia32_install_ldt(SEL_NULL);
387
388    /* load TSS selector into Task Register (TR) */
389    ia32_install_tss(SEL_TSS);
390}
391
392static BOOT_CODE cap_t create_it_page_table_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
393{
394    cap_t cap;
395    cap = cap_page_table_cap_new(
396              1,    /* capPTIsMapped      */
397              asid, /* capPTMappedASID    */
398              vptr, /* capPTMappedAddress */
399              pptr  /* capPTBasePtr       */
400          );
401    if (asid != asidInvalid) {
402        map_it_pt_cap(vspace_cap, cap);
403    }
404    return cap;
405}
406
407static BOOT_CODE cap_t create_it_page_directory_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid)
408{
409    cap_t cap;
410    cap = cap_page_directory_cap_new(
411              true,    /* capPDIsMapped   */
412              IT_ASID, /* capPDMappedASID */
413              vptr,    /* capPDMappedAddress */
414              pptr  /* capPDBasePtr    */
415          );
416    if (asid != asidInvalid && cap_get_capType(vspace_cap) != cap_null_cap) {
417        map_it_pd_cap(vspace_cap, cap);
418    }
419    return cap;
420}
421
422BOOT_CODE word_t arch_get_n_paging(v_region_t it_v_reg)
423{
424    word_t n = get_n_paging(it_v_reg, PT_INDEX_BITS + PAGE_BITS);
425#ifdef CONFIG_IOMMU
426    n += vtd_get_n_paging(&boot_state.rmrr_list);
427#endif
428    return n;
429}
430
431/* Create an address space for the initial thread.
432 * This includes page directory and page tables */
433BOOT_CODE cap_t create_it_address_space(cap_t root_cnode_cap, v_region_t it_v_reg)
434{
435    cap_t      vspace_cap;
436    vptr_t     vptr;
437    seL4_SlotPos slot_pos_before;
438    seL4_SlotPos slot_pos_after;
439
440    slot_pos_before = ndks_boot.slot_pos_cur;
441    copyGlobalMappings((vspace_root_t *)rootserver.vspace);
442    cap_t pd_cap = create_it_page_directory_cap(cap_null_cap_new(), rootserver.vspace, 0, IT_ASID);
443    write_slot(SLOT_PTR(pptr_of_cap(root_cnode_cap), seL4_CapInitThreadVSpace), pd_cap);
444    vspace_cap = pd_cap;
445
446    /* create all PT objs and caps necessary to cover userland image */
447
448    for (vptr = ROUND_DOWN(it_v_reg.start, PT_INDEX_BITS + PAGE_BITS);
449         vptr < it_v_reg.end;
450         vptr += BIT(PT_INDEX_BITS + PAGE_BITS)) {
451        if (!provide_cap(root_cnode_cap,
452                         create_it_page_table_cap(vspace_cap, it_alloc_paging(), vptr, IT_ASID))
453           ) {
454            return cap_null_cap_new();
455        }
456    }
457
458    slot_pos_after = ndks_boot.slot_pos_cur;
459    ndks_boot.bi_frame->userImagePaging = (seL4_SlotRegion) {
460        slot_pos_before, slot_pos_after
461    };
462
463    return vspace_cap;
464}
465
466static BOOT_CODE cap_t create_it_frame_cap(pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
467                                           vm_page_map_type_t map_type)
468{
469    vm_page_size_t frame_size;
470
471    if (use_large) {
472        frame_size = X86_LargePage;
473    } else {
474        frame_size = X86_SmallPage;
475    }
476
477    return
478        cap_frame_cap_new(
479            frame_size,                    /* capFSize           */
480            ASID_LOW(asid),                /* capFMappedASIDLow  */
481            vptr,                          /* capFMappedAddress  */
482            map_type,                      /* capFMapType        */
483            false,                         /* capFIsDevice       */
484            ASID_HIGH(asid),               /* capFMappedASIDHigh */
485            wordFromVMRights(VMReadWrite), /* capFVMRights       */
486            pptr                           /* capFBasePtr        */
487        );
488}
489
490BOOT_CODE cap_t create_unmapped_it_frame_cap(pptr_t pptr, bool_t use_large)
491{
492    return create_it_frame_cap(pptr, 0, asidInvalid, use_large, X86_MappingNone);
493}
494
495BOOT_CODE cap_t create_mapped_it_frame_cap(cap_t vspace_cap, pptr_t pptr, vptr_t vptr, asid_t asid, bool_t use_large,
496                                           bool_t executable UNUSED)
497{
498    cap_t cap = create_it_frame_cap(pptr, vptr, asid, use_large, X86_MappingVSpace);
499    map_it_frame_cap(vspace_cap, cap);
500    return cap;
501}
502
503/* ==================== BOOT CODE FINISHES HERE ==================== */
504
505pde_t CONST makeUserPDELargePage(paddr_t paddr, vm_attributes_t vm_attr, vm_rights_t vm_rights)
506{
507    return pde_pde_large_new(
508               paddr,                                          /* page_base_address    */
509               vm_attributes_get_x86PATBit(vm_attr),           /* pat                  */
510               0,                                              /* avl                  */
511               0,                                              /* global               */
512               0,                                              /* dirty                */
513               0,                                              /* accessed             */
514               vm_attributes_get_x86PCDBit(vm_attr),           /* cache_disabled       */
515               vm_attributes_get_x86PWTBit(vm_attr),           /* write_through        */
516               SuperUserFromVMRights(vm_rights),               /* super_user           */
517               WritableFromVMRights(vm_rights),                /* read_write           */
518               1                                               /* present              */
519           );
520}
521
522pde_t CONST makeUserPDEPageTable(paddr_t paddr, vm_attributes_t vm_attr)
523{
524    return pde_pde_pt_new(
525               paddr,                                      /* pt_base_address  */
526               0,                                          /* avl              */
527               0,                                          /* accessed         */
528               vm_attributes_get_x86PCDBit(vm_attr),       /* cache_disabled   */
529               vm_attributes_get_x86PWTBit(vm_attr),       /* write_through    */
530               1,                                          /* super_user       */
531               1,                                          /* read_write       */
532               1                                           /* present          */
533           );
534}
535
536pde_t CONST makeUserPDEInvalid(void)
537{
538    /* The bitfield only declares two kinds of PDE entries (page tables or large pages)
539     * and an invalid entry should really be a third type, but we can simulate it by
540     * creating an invalid (present bit 0) entry of either of the defined types */
541    return pde_pde_pt_new(
542               0,  /* pt_base_address  */
543               0,  /* avl              */
544               0,  /* accessed         */
545               0,  /* cache_disabled   */
546               0,  /* write_through    */
547               0,  /* super_user       */
548               0,  /* read_write       */
549               0   /* present          */
550           );
551}
552
553pte_t CONST makeUserPTE(paddr_t paddr, vm_attributes_t vm_attr, vm_rights_t vm_rights)
554{
555    return pte_new(
556               paddr,                                          /* page_base_address    */
557               0,                                              /* avl                  */
558               0,                                              /* global               */
559               vm_attributes_get_x86PATBit(vm_attr),           /* pat                  */
560               0,                                              /* dirty                */
561               0,                                              /* accessed             */
562               vm_attributes_get_x86PCDBit(vm_attr),           /* cache_disabled       */
563               vm_attributes_get_x86PWTBit(vm_attr),           /* write_through        */
564               SuperUserFromVMRights(vm_rights),               /* super_user           */
565               WritableFromVMRights(vm_rights),                /* read_write           */
566               1                                               /* present              */
567           );
568}
569
570
571pte_t CONST makeUserPTEInvalid(void)
572{
573    return pte_new(
574               0,                   /* page_base_address    */
575               0,                   /* avl                  */
576               0,                   /* global               */
577               0,                   /* pat                  */
578               0,                   /* dirty                */
579               0,                   /* accessed             */
580               0,                   /* cache_disabled       */
581               0,                   /* write_through        */
582               0,                   /* super_user           */
583               0,                   /* read_write           */
584               0                    /* present              */
585           );
586}
587
588void setVMRoot(tcb_t *tcb)
589{
590    cap_t               threadRoot;
591    vspace_root_t *vspace_root;
592    asid_t              asid;
593    findVSpaceForASID_ret_t find_ret;
594
595    threadRoot = TCB_PTR_CTE_PTR(tcb, tcbVTable)->cap;
596
597    vspace_root = getValidNativeRoot(threadRoot);
598    if (!vspace_root) {
599        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
600        setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
601        return;
602    }
603
604    asid = cap_get_capMappedASID(threadRoot);
605    find_ret = findVSpaceForASID(asid);
606    if (find_ret.status != EXCEPTION_NONE || find_ret.vspace_root != vspace_root) {
607        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
608        setCurrentPD(pptr_to_paddr(ia32KSGlobalPD));
609        return;
610    }
611
612    /* only set PD if we change it, otherwise we flush the TLB needlessly */
613    if (getCurrentPD() != pptr_to_paddr(vspace_root)) {
614        SMP_COND_STATEMENT(tlb_bitmap_unset(paddr_to_pptr(getCurrentPD()), getCurrentCPUIndex());)
615        SMP_COND_STATEMENT(tlb_bitmap_set(vspace_root, getCurrentCPUIndex());)
616
617        setCurrentPD(pptr_to_paddr(vspace_root));
618    }
619}
620
621void hwASIDInvalidate(asid_t asid, vspace_root_t *vspace)
622{
623    /* 32-bit does not have PCID */
624    return;
625}
626
627exception_t decodeX86ModeMMUInvocation(
628    word_t invLabel,
629    word_t length,
630    cptr_t cptr,
631    cte_t *cte,
632    cap_t cap,
633    extra_caps_t excaps,
634    word_t *buffer
635)
636{
637    switch (cap_get_capType(cap)) {
638    case cap_page_directory_cap:
639        return decodeIA32PageDirectoryInvocation(invLabel, length, cte, cap, excaps, buffer);
640
641    default:
642        fail("Invalid arch cap type");
643    }
644}
645
646
647bool_t modeUnmapPage(vm_page_size_t page_size, vspace_root_t *vroot, vptr_t vaddr, void *pptr)
648{
649    fail("Invalid page type");
650    return false;
651}
652
653exception_t decodeX86ModeMapPage(word_t invLabel, vm_page_size_t page_size, cte_t *cte, cap_t cap,
654                                 vspace_root_t *vroot, vptr_t vaddr, paddr_t paddr, vm_rights_t vm_rights, vm_attributes_t vm_attr)
655{
656    fail("Invalid Page type");
657}
658
659#ifdef CONFIG_KERNEL_LOG_BUFFER
660exception_t benchmark_arch_map_logBuffer(word_t frame_cptr)
661{
662    lookupCapAndSlot_ret_t lu_ret;
663    vm_page_size_t frameSize;
664    pptr_t frame_pptr;
665
666    /* faulting section */
667    lu_ret = lookupCapAndSlot(NODE_STATE(ksCurThread), frame_cptr);
668
669    if (unlikely(lu_ret.status != EXCEPTION_NONE)) {
670        userError("Invalid cap #%lu.", frame_cptr);
671        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
672
673        return EXCEPTION_SYSCALL_ERROR;
674    }
675
676    if (cap_get_capType(lu_ret.cap) != cap_frame_cap) {
677        userError("Invalid cap. Log buffer should be of a frame cap");
678        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
679
680        return EXCEPTION_SYSCALL_ERROR;
681    }
682
683    frameSize = cap_frame_cap_get_capFSize(lu_ret.cap);
684
685    if (frameSize != X86_LargePage) {
686        userError("Invalid size for log Buffer. The kernel expects at least 1M log buffer");
687        current_fault = seL4_Fault_CapFault_new(frame_cptr, false);
688
689        return EXCEPTION_SYSCALL_ERROR;
690    }
691
692    frame_pptr = cap_frame_cap_get_capFBasePtr(lu_ret.cap);
693
694    ksUserLogBuffer = pptr_to_paddr((void *) frame_pptr);
695
696    /* fill global log page table with mappings */
697    for (int idx = 0; idx < BIT(PT_INDEX_BITS); idx++) {
698        paddr_t physical_address = ksUserLogBuffer + (idx << seL4_PageBits);
699
700        pte_t pte = pte_new(
701                        physical_address,   /* page_base_address    */
702                        0,                  /* avl                  */
703                        1,                  /* global               */
704                        VMKernelOnly,       /* pat                  */
705                        0,                  /* dirty                */
706                        0,                  /* accessed             */
707                        0,                  /* cache_disabled       */
708                        1,                  /* write_through        */
709                        1,                  /* super_user           */
710                        1,                  /* read_write           */
711                        1                   /* present              */
712                    );
713
714        ia32KSGlobalLogPT[idx] = pte;
715        invalidateTLBEntry(KS_LOG_PPTR + (idx << seL4_PageBits), MASK(ksNumCPUs));
716    }
717
718    return EXCEPTION_NONE;
719}
720#endif /* CONFIG_KERNEL_LOG_BUFFER */
721