1/**
2 * \file
3 * \brief
4 */
5
6/*
7 * Copyright (c) 2010-2013, 2016 ETH Zurich.
8 * Copyright (c) 2014, HP Labs.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstr. 6, CH-8092 Zurich. Attn: Systems Group.
14 */
15
16#include <kernel.h>
17#include <dispatch.h>
18#include <target/x86_64/paging_kernel_target.h>
19#include <target/x86_64/offsets_target.h>
20#include <paging_kernel_arch.h>
21#include <mdb/mdb_tree.h>
22#include <string.h>
23#include <barrelfish_kpi/init.h>
24#include <cap_predicates.h>
25#include <paging_generic.h>
26
27#ifdef __k1om__
28#include <target/k1om/offsets_target.h>
29#define MEMORY_OFFSET K1OM_MEMORY_OFFSET
30#else
31#include <target/x86_64/offsets_target.h>
32#define MEMORY_OFFSET X86_64_MEMORY_OFFSET
33#endif
34
35/// Map within a x86_64 non leaf ptable
36static errval_t x86_64_non_ptable(struct capability *dest, cslot_t slot,
37                                  struct capability *src, uintptr_t flags,
38                                  uintptr_t offset, size_t pte_count,
39                                  struct cte *mapping_cte)
40{
41    //printf("page_mappings_arch:x86_64_non_ptable\n");
42    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
43        return SYS_ERR_VNODE_SLOT_INVALID;
44    }
45
46    if (type_is_vnode(src->type) && pte_count != 1) { // only allow single ptable mappings
47        debug(SUBSYS_PAGING, "src type and count mismatch\n");
48        return SYS_ERR_VM_MAP_SIZE;
49    }
50
51    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
52        debug(SUBSYS_PAGING, "mapping size invalid (%zd)\n", pte_count);
53        return SYS_ERR_VM_MAP_SIZE;
54    }
55
56    size_t page_size = 0;
57    paging_x86_64_flags_t flags_large = 0;
58    switch (dest->type) {
59        case ObjType_VNode_x86_64_pml4:
60            if (src->type != ObjType_VNode_x86_64_pdpt) { // Right mapping
61                debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
62                return SYS_ERR_WRONG_MAPPING;
63            }
64            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
65                return SYS_ERR_VNODE_SLOT_RESERVED;
66            }
67            break;
68        case ObjType_VNode_x86_64_pdpt:
69            // huge page support
70            if (src->type != ObjType_VNode_x86_64_pdir) { // Right mapping
71                if (src->type != ObjType_Frame &&
72                    src->type != ObjType_DevFrame) { // Right mapping
73                    debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
74                    return SYS_ERR_WRONG_MAPPING;
75                }
76
77                if (get_size(src) < HUGE_PAGE_SIZE) {
78                    return SYS_ERR_VM_FRAME_TOO_SMALL;
79                }
80
81                if ((get_address(src)+offset) & HUGE_PAGE_MASK) {
82                    return SYS_ERR_VM_FRAME_UNALIGNED;
83                }
84
85                // TODO: check if the system allows 1GB mappings
86                page_size = X86_64_HUGE_PAGE_SIZE;
87                // check offset within frame
88                genpaddr_t off = offset;
89
90                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
91                    printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
92                            off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
93                    return SYS_ERR_FRAME_OFFSET_INVALID;
94                }
95                // Calculate page access protection flags /
96                // Get frame cap rights
97                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
98                // Mask with provided access rights mask
99                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
100                // Add additional arch-specific flags
101                flags_large |= X86_64_PTABLE_FLAGS(flags);
102                // Unconditionally mark the page present
103                flags_large |= X86_64_PTABLE_PRESENT;
104            }
105            break;
106        case ObjType_VNode_x86_64_pdir:
107            // superpage support
108            if (src->type != ObjType_VNode_x86_64_ptable) { // Right mapping
109                if (src->type != ObjType_Frame &&
110                    src->type != ObjType_DevFrame) { // Right mapping
111                    debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
112                    return SYS_ERR_WRONG_MAPPING;
113                }
114
115                if (get_size(src) < LARGE_PAGE_SIZE) {
116                    return SYS_ERR_VM_FRAME_TOO_SMALL;
117                }
118
119                if ((get_address(src)+offset) & LARGE_PAGE_MASK) {
120                    return SYS_ERR_VM_FRAME_UNALIGNED;
121                }
122
123                page_size = X86_64_LARGE_PAGE_SIZE;
124
125                // check offset within frame
126                genpaddr_t off = offset;
127
128                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
129                    printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
130                            off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
131                    return SYS_ERR_FRAME_OFFSET_INVALID;
132                }
133                // Calculate page access protection flags /
134                // Get frame cap rights
135                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
136                // Mask with provided access rights mask
137                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
138                // Add additional arch-specific flags
139                flags_large |= X86_64_PTABLE_FLAGS(flags);
140                // Unconditionally mark the page present
141                flags_large |= X86_64_PTABLE_PRESENT;
142
143            }
144            break;
145        default:
146            debug(SUBSYS_PAGING, "dest type invalid\n");
147            return SYS_ERR_DEST_TYPE_INVALID;
148    }
149
150    // Convert destination base address
151    genpaddr_t dest_gp   = get_address(dest);
152    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
153    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
154    // Convert source base address
155    genpaddr_t src_gp   = get_address(src);
156    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
157
158    // set metadata
159    create_mapping_cap(mapping_cte, src, cte_for_cap(dest),
160                       slot, pte_count);
161
162    cslot_t last_slot = slot + pte_count;
163    for (; slot < last_slot; slot++, offset += page_size) {
164        // Destination
165        union x86_64_pdir_entry *entry = (union x86_64_pdir_entry *)dest_lv + slot;
166
167        if (X86_64_IS_PRESENT(entry)) {
168            // cleanup mapping info
169            // TODO: cleanup already mapped pages
170            memset(mapping_cte, 0, sizeof(*mapping_cte));
171            debug(SUBSYS_PAGING, "slot in use\n");
172            printk(LOG_NOTE, "slot = 0x%016"PRIx64"\n", entry->raw);
173            return SYS_ERR_VNODE_SLOT_INUSE;
174        }
175
176        // determine if we map a large/huge page or a normal entry
177        if (page_size == X86_64_LARGE_PAGE_SIZE)
178        {
179            //a large page is mapped
180            paging_x86_64_map_large((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
181        } else if (page_size == X86_64_HUGE_PAGE_SIZE) {
182            // a huge page is mapped
183            paging_x86_64_map_huge((union x86_64_ptable_entry *)entry, src_lp + offset, flags_large);
184        } else {
185            //a normal paging structure entry is mapped
186            paging_x86_64_map_table(entry, src_lp + offset);
187        }
188    }
189
190    return SYS_ERR_OK;
191}
192
193/// Map within a x86_64 ptable
194static errval_t x86_64_ptable(struct capability *dest, cslot_t slot,
195                              struct capability *src, uintptr_t mflags,
196                              uintptr_t offset, size_t pte_count,
197                              struct cte *mapping_cte)
198{
199    //printf("page_mappings_arch:x86_64_ptable\n");
200    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
201        debug(SUBSYS_PAGING, "    vnode_invalid\n");
202        return SYS_ERR_VNODE_SLOT_INVALID;
203    }
204
205    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
206        debug(SUBSYS_PAGING, "mapping size invalid (%zd)\n", pte_count);
207        return SYS_ERR_VM_MAP_SIZE;
208    }
209
210    if (src->type != ObjType_Frame &&
211        src->type != ObjType_DevFrame) { // Right mapping
212        debug(SUBSYS_PAGING, "src type invalid\n");
213        return SYS_ERR_WRONG_MAPPING;
214    }
215
216    // check offset within frame
217    genpaddr_t off = offset;
218    if (off + pte_count * X86_64_BASE_PAGE_SIZE > get_size(src)) {
219        debug(SUBSYS_PAGING, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
220                off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
221        printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
222                off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
223        char buf[256];
224        sprint_cap(buf,256,src);
225        printk(LOG_NOTE, "src = %s\n", buf);
226        return SYS_ERR_FRAME_OFFSET_INVALID;
227    }
228
229
230    /* Calculate page access protection flags */
231    // Get frame cap rights
232    paging_x86_64_flags_t flags =
233        paging_x86_64_cap_to_page_flags(src->rights);
234    // Mask with provided access rights mask
235    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
236    // Add additional arch-specific flags
237    flags |= X86_64_PTABLE_FLAGS(mflags);
238    // Unconditionally mark the page present
239    flags |= X86_64_PTABLE_PRESENT;
240
241    // Convert destination base address
242    genpaddr_t dest_gp   = get_address(dest);
243    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
244    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
245    // Convert source base address
246    genpaddr_t src_gp   = get_address(src);
247    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
248    // Set metadata
249    create_mapping_cap(mapping_cte, src, cte_for_cap(dest),
250                       slot, pte_count);
251
252    cslot_t last_slot = slot + pte_count;
253    for (; slot < last_slot; slot++, offset += X86_64_BASE_PAGE_SIZE) {
254        union x86_64_ptable_entry *entry =
255            (union x86_64_ptable_entry *)dest_lv + slot;
256
257        /* FIXME: Flush TLB if the page is already present
258         * in the meantime, since we don't do this, we just fail to avoid
259         * ever reusing a VA mapping */
260        if (X86_64_IS_PRESENT(entry)) {
261            // TODO: cleanup already mapped pages
262            memset(mapping_cte, 0, sizeof(*mapping_cte));
263            debug(LOG_WARN, "Trying to remap an already-present page is NYI, but "
264                  "this is most likely a user-space bug!\n");
265            return SYS_ERR_VNODE_SLOT_INUSE;
266        }
267
268        // Carry out the page mapping
269        paging_x86_64_map(entry, src_lp + offset, flags);
270    }
271
272    return SYS_ERR_OK;
273}
274
275typedef errval_t (*mapping_handler_t)(struct capability *dest_cap,
276                                      cslot_t dest_slot,
277                                      struct capability *src_cap,
278                                      uintptr_t flags, uintptr_t offset,
279                                      size_t pte_count,
280                                      struct cte *mapping_cte);
281
282/// Dispatcher table for the type of mapping to create
283static mapping_handler_t handler[ObjType_Num] = {
284    [ObjType_VNode_x86_64_pml4]   = x86_64_non_ptable,
285    [ObjType_VNode_x86_64_pdpt]   = x86_64_non_ptable,
286    [ObjType_VNode_x86_64_pdir]   = x86_64_non_ptable,
287    [ObjType_VNode_x86_64_ptable] = x86_64_ptable,
288};
289
290
291/// Create page mappings
292errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
293                            struct cte *src_cte, uintptr_t flags,
294                            uintptr_t offset, uintptr_t pte_count,
295                            struct cte *mapping_cte)
296{
297    assert(type_is_vnode(dest_vnode_cte->cap.type));
298    assert(mapping_cte->cap.type == ObjType_Null);
299
300    struct capability *src_cap  = &src_cte->cap;
301    struct capability *dest_cap = &dest_vnode_cte->cap;
302    mapping_handler_t handler_func = handler[dest_cap->type];
303
304    assert(handler_func != NULL);
305
306#if 0
307    genpaddr_t paddr = get_address(&src_cte->cap) + offset;
308    genvaddr_t vaddr;
309    compile_vaddr(dest_vnode_cte, dest_slot, &vaddr);
310    printk(LOG_NOTE, "mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
311#endif
312
313    cslot_t last_slot = dest_slot + pte_count;
314
315    if (last_slot > X86_64_PTABLE_SIZE) {
316        // requested map overlaps leaf page table
317        debug(SUBSYS_CAPS,
318                "caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
319        return SYS_ERR_VM_RETRY_SINGLE;
320    }
321
322    errval_t r = handler_func(dest_cap, dest_slot, src_cap, flags, offset,
323                              pte_count, mapping_cte);
324    if (err_is_fail(r)) {
325        assert(mapping_cte->cap.type == ObjType_Null);
326        debug(SUBSYS_PAGING, "caps_copy_to_vnode: handler func returned %ld\n", r);
327        return r;
328    }
329
330    /* insert mapping cap into mdb */
331    errval_t err = mdb_insert(mapping_cte);
332    if (err_is_fail(err)) {
333        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
334    }
335
336    TRACE_CAP_MSG("created", mapping_cte);
337
338    return err;
339}
340
341size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
342{
343    // iterate over affected leaf ptables
344    size_t unmapped_pages = 0;
345    union x86_64_ptable_entry *ptentry = (union x86_64_ptable_entry *)pt + slot;
346    for (int i = 0; i < num_pages; i++) {
347        ptentry++->raw = 0;
348        unmapped_pages++;
349    }
350    return unmapped_pages;
351}
352
353static size_t ptable_type_get_page_size(enum objtype type)
354{
355    switch(type) {
356        case ObjType_VNode_x86_64_ptable:
357            return BASE_PAGE_SIZE;
358        case ObjType_VNode_x86_64_pdir:
359            return LARGE_PAGE_SIZE;
360        case ObjType_VNode_x86_64_pdpt:
361            return HUGE_PAGE_SIZE;
362        case ObjType_VNode_x86_64_pml4:
363            return 0;
364
365        default:
366            assert(!"Type not x86_64 vnode");
367    }
368    return 0;
369}
370
371/**
372 * \brief modify flags of entries in `leaf_pt`.
373 *
374 * \arg leaf_pt the frame whose mapping should be modified
375 * \arg offset the offset from the first page table entry in entries
376 * \arg pages the number of pages to modify
377 * \arg flags the new flags
378 * \arg va_hint a user-supplied virtual address for hinting selective TLB
379 *              flushing
380 */
381static errval_t generic_modify_flags(struct cte *leaf_pt, size_t offset,
382                                     size_t pages,
383                                     paging_x86_64_flags_t flags)
384{
385    lvaddr_t base = local_phys_to_mem(get_address(&leaf_pt->cap)) +
386        offset * sizeof(union x86_64_ptable_entry);
387
388    size_t pagesize = BASE_PAGE_SIZE;
389    switch(leaf_pt->cap.type) {
390        case ObjType_VNode_x86_64_ptable :
391            for (int i = 0; i < pages; i++) {
392                union x86_64_ptable_entry *entry =
393                    (union x86_64_ptable_entry *)base + i;
394                if (entry->base.present) {
395                    paging_x86_64_modify_flags(entry, flags);
396                }
397            }
398            break;
399        case ObjType_VNode_x86_64_pdir :
400            for (int i = 0; i < pages; i++) {
401                union x86_64_ptable_entry *entry =
402                    (union x86_64_ptable_entry *)base + i;
403                if (entry->large.present) {
404                    paging_x86_64_modify_flags_large(entry, flags);
405                }
406            }
407            pagesize = LARGE_PAGE_SIZE;
408            break;
409        case ObjType_VNode_x86_64_pdpt :
410            for (int i = 0; i < pages; i++) {
411                union x86_64_ptable_entry *entry =
412                    (union x86_64_ptable_entry *)base + i;
413                if (entry->large.present) {
414                    paging_x86_64_modify_flags_huge(entry, flags);
415                }
416            }
417            pagesize = HUGE_PAGE_SIZE;
418            break;
419        default:
420            return SYS_ERR_WRONG_MAPPING;
421    }
422
423    return SYS_ERR_OK;
424}
425
426/**
427 * \brief modify flags of mapping `mapping`.
428 *
429 * \arg mapping the mapping to modify
430 * \arg offset the offset from the first page table entry in entries
431 * \arg pages the number of pages to modify
432 * \arg flags the new flags
433 * \arg va_hint a user-supplied virtual address for hinting selective TLB
434 *              flushing
435 */
436errval_t page_mappings_modify_flags(struct capability *mapping, size_t offset,
437                                    size_t pages, size_t mflags, genvaddr_t va_hint)
438{
439    assert(type_is_mapping(mapping->type));
440    struct Frame_Mapping *info = &mapping->u.frame_mapping;
441
442    /* Calculate page access protection flags */
443    // Get frame cap rights
444    paging_x86_64_flags_t flags =
445        paging_x86_64_cap_to_page_flags(info->cap->rights);
446    // Mask with provided access rights mask
447    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
448    // Add additional arch-specific flags
449    flags |= X86_64_PTABLE_FLAGS(mflags);
450    // Unconditionally mark the page present
451    flags |= X86_64_PTABLE_PRESENT;
452
453    // check arguments
454    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
455        return SYS_ERR_VNODE_SLOT_INVALID;
456    }
457    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
458        return SYS_ERR_VM_MAP_SIZE;
459    }
460
461    // get pt cap to figure out page size
462    struct cte *leaf_pt = info->ptable;
463    if (!type_is_vnode(leaf_pt->cap.type)) {
464        return SYS_ERR_VNODE_TYPE;
465    }
466    assert(type_is_vnode(leaf_pt->cap.type));
467
468    errval_t err;
469    err = generic_modify_flags(leaf_pt, offset, pages, flags);
470    if (err_is_fail(err)) {
471        return err;
472    }
473
474    size_t pagesize = ptable_type_get_page_size(leaf_pt->cap.type);
475    if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
476        debug(SUBSYS_PAGING,
477                "selective flush: 0x%"PRIxGENVADDR"--0x%"PRIxGENVADDR"\n",
478                va_hint, va_hint + pages * pagesize);
479        // use as direct hint
480        // invlpg should work for large/huge pages
481        for (int i = 0; i < pages; i++) {
482            do_one_tlb_flush(va_hint + i * pagesize);
483        }
484    } else if (va_hint == 1) {
485        // XXX: remove this or cleanup interface, -SG, 2015-03-11
486        // do computed selective flush
487        debug(SUBSYS_PAGING, "computed selective flush\n");
488        return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
489    } else {
490        debug(SUBSYS_PAGING, "full flush\n");
491        /* do full TLB flush */
492        do_full_tlb_flush();
493    }
494
495    return SYS_ERR_OK;
496}
497
498errval_t ptable_modify_flags(struct capability *leaf_pt, size_t offset,
499                                    size_t pages, size_t mflags)
500{
501    /* Calculate page access protection flags */
502    // Mask with provided access rights mask
503    paging_x86_64_flags_t flags = X86_64_PTABLE_USER_SUPERVISOR;
504    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
505    // Add additional arch-specific flags
506    flags |= X86_64_PTABLE_FLAGS(mflags);
507    // Unconditionally mark the page present
508    flags |= X86_64_PTABLE_PRESENT;
509
510    // check arguments
511    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
512        return SYS_ERR_VNODE_SLOT_INVALID;
513    }
514    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
515        return SYS_ERR_VM_MAP_SIZE;
516    }
517
518    errval_t err = generic_modify_flags(cte_for_cap(leaf_pt), offset, pages, flags);
519
520    do_full_tlb_flush();
521
522    return err;
523}
524
525void paging_dump_tables(struct dcb *dispatcher)
526{
527    if (!local_phys_is_valid(dispatcher->vspace)) {
528        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
529               dispatcher->vspace);
530        return;
531    }
532    lvaddr_t root_pt = local_phys_to_mem(dispatcher->vspace);
533
534    // loop over pdpts
535    union x86_64_ptable_entry *pt;
536    size_t kernel_pml4e = X86_64_PML4_BASE(X86_64_MEMORY_OFFSET);
537    for (int pdpt_index = 0; pdpt_index < kernel_pml4e; pdpt_index++) {
538        union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + pdpt_index;
539        if (!pdpt->d.present) { continue; }
540        genpaddr_t pdpt_gp = (genpaddr_t)pdpt->d.base_addr << BASE_PAGE_BITS;
541        lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
542
543        for (int pdir_index = 0; pdir_index < X86_64_PTABLE_SIZE; pdir_index++) {
544            // get pdir
545            union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + pdir_index;
546            pt = (union x86_64_ptable_entry*)pdir;
547            if (!pdir->d.present) { continue; }
548            // check if pdir or huge page
549            if (pt->huge.always1) {
550                // is huge page mapping
551                genpaddr_t paddr = (genpaddr_t)pt->huge.base_addr << HUGE_PAGE_BITS;
552                printf("%d.%d: 0x%"PRIxGENPADDR"\n", pdpt_index, pdir_index, paddr);
553                // goto next pdpt entry
554                continue;
555            }
556            genpaddr_t pdir_gp = (genpaddr_t)pdir->d.base_addr << BASE_PAGE_BITS;
557            lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
558
559            for (int ptable_index = 0; ptable_index < X86_64_PTABLE_SIZE; ptable_index++) {
560                // get ptable
561                union x86_64_pdir_entry *ptable = (union x86_64_pdir_entry *)pdir_lv + ptable_index;
562                pt = (union x86_64_ptable_entry *)ptable;
563                if (!ptable->d.present) { continue; }
564                // check if ptable or large page
565                if (pt->large.always1) {
566                    // is large page mapping
567                    genpaddr_t paddr = (genpaddr_t)pt->large.base_addr << LARGE_PAGE_BITS;
568                    printf("%d.%d.%d: 0x%"PRIxGENPADDR"\n", pdpt_index, pdir_index, ptable_index, paddr);
569                    // goto next pdir entry
570                    continue;
571                }
572                genpaddr_t ptable_gp = (genpaddr_t)ptable->d.base_addr << BASE_PAGE_BITS;
573                lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
574
575                for (int entry = 0; entry < X86_64_PTABLE_SIZE; entry++) {
576                    union x86_64_ptable_entry *e =
577                        (union x86_64_ptable_entry *)ptable_lv + entry;
578                    if (!e->base.present) { continue; }
579                    genpaddr_t paddr = (genpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
580                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" (raw=0x%016"PRIx64")\n",
581                            pdpt_index, pdir_index, ptable_index, entry, paddr, e->raw);
582                }
583            }
584        }
585    }
586}
587