1/**
2 * \file
3 * \brief
4 */
5
6/*
7 * Copyright (c) 2010-2013, 2016 ETH Zurich.
8 * Copyright (c) 2014, HP Labs.
9 * All rights reserved.
10 *
11 * This file is distributed under the terms in the attached LICENSE file.
12 * If you do not find this file, copies can be found by writing to:
13 * ETH Zurich D-INFK, Universitaetstrasse 6, CH-8092 Zurich. Attn: Systems Group.
14 */
15
16#include <kernel.h>
17#include <dispatch.h>
18#include <target/x86_64/paging_kernel_target.h>
19#include <target/x86_64/offsets_target.h>
20#include <paging_kernel_arch.h>
21#include <mdb/mdb_tree.h>
22#include <string.h>
23#include <barrelfish_kpi/init.h>
24#include <cap_predicates.h>
25#include <paging_generic.h>
26#include <useraccess.h>
27
28#ifdef __k1om__
29#include <target/k1om/offsets_target.h>
30#define MEMORY_OFFSET K1OM_MEMORY_OFFSET
31#else
32#include <target/x86_64/offsets_target.h>
33#define MEMORY_OFFSET X86_64_MEMORY_OFFSET
34#endif
35
36/// Map within a x86_64 non leaf ptable
37static errval_t x86_64_non_ptable(struct capability *dest, cslot_t slot,
38                                  struct capability *src, uintptr_t flags,
39                                  uintptr_t offset, size_t pte_count,
40                                  struct cte *mapping_cte)
41{
42    //printf("page_mappings_arch:x86_64_non_ptable\n");
43    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
44        return SYS_ERR_VNODE_SLOT_INVALID;
45    }
46
47    if (type_is_vnode(src->type) && pte_count != 1) { // only allow single ptable mappings
48        debug(SUBSYS_PAGING, "src type and count mismatch\n");
49        return SYS_ERR_VM_MAP_SIZE;
50    }
51
52    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
53        debug(SUBSYS_PAGING, "mapping size invalid (%zd)\n", pte_count);
54        return SYS_ERR_VM_MAP_SIZE;
55    }
56
57    size_t page_size = 0;
58    paging_x86_64_flags_t flags_large = 0;
59    bool is_ept = false;
60    switch (dest->type) {
61        case ObjType_VNode_x86_64_pml4:
62            if (src->type != ObjType_VNode_x86_64_pdpt) { // Right mapping
63                debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
64                return SYS_ERR_WRONG_MAPPING;
65            }
66            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
67                return SYS_ERR_VNODE_SLOT_RESERVED;
68            }
69            break;
70        case ObjType_VNode_x86_64_ept_pml4:
71            is_ept = true;
72            if (src->type != ObjType_VNode_x86_64_ept_pdpt) { // Right mapping
73                printf("src type invalid\n");
74                return SYS_ERR_WRONG_MAPPING;
75            }
76            if(slot >= X86_64_PML4_BASE(MEMORY_OFFSET)) { // Kernel mapped here
77                return SYS_ERR_VNODE_SLOT_RESERVED;
78            }
79            break;
80        case ObjType_VNode_x86_64_pdpt:
81            // huge page support
82            if (src->type != ObjType_VNode_x86_64_pdir) { // Right mapping
83                if (src->type != ObjType_Frame &&
84                    src->type != ObjType_DevFrame &&
85                    src->type != ObjType_EndPointUMP) { // Right mapping
86                    debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
87                    return SYS_ERR_WRONG_MAPPING;
88                }
89
90                if (get_size(src) < HUGE_PAGE_SIZE) {
91                    return SYS_ERR_VM_FRAME_TOO_SMALL;
92                }
93
94                if ((get_address(src)+offset) & HUGE_PAGE_MASK) {
95                    return SYS_ERR_VM_FRAME_UNALIGNED;
96                }
97
98                // TODO: check if the system allows 1GB mappings
99                page_size = X86_64_HUGE_PAGE_SIZE;
100                // check offset within frame
101                genpaddr_t off = offset;
102
103                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
104                    printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
105                            off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
106                    return SYS_ERR_FRAME_OFFSET_INVALID;
107                }
108                // Calculate page access protection flags /
109                // Get frame cap rights
110                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
111                // Mask with provided access rights mask
112                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
113                // Add additional arch-specific flags
114                flags_large |= X86_64_PTABLE_FLAGS(flags);
115                // Unconditionally mark the page present
116                flags_large |= X86_64_PTABLE_PRESENT;
117            }
118            break;
119        case ObjType_VNode_x86_64_ept_pdpt:
120            is_ept = true;
121            // huge page support
122            if (src->type != ObjType_VNode_x86_64_ept_pdir) { // Right mapping
123                // TODO: check if the system allows 1GB mappings
124                page_size = X86_64_HUGE_PAGE_SIZE;
125                // check offset within frame
126                genpaddr_t off = offset;
127
128                if (off + pte_count * X86_64_HUGE_PAGE_SIZE > get_size(src)) {
129                    return SYS_ERR_FRAME_OFFSET_INVALID;
130                }
131                // Calculate page access protection flags /
132                // Get frame cap rights
133                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
134                // Mask with provided access rights mask
135                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
136                // Add additional arch-specific flags
137                flags_large |= X86_64_PTABLE_FLAGS(flags);
138                // Unconditionally mark the page present
139                flags_large |= X86_64_PTABLE_PRESENT;
140            }
141            break;
142        case ObjType_VNode_x86_64_pdir:
143            // superpage support
144            if (src->type != ObjType_VNode_x86_64_ptable) { // Right mapping
145                if (src->type != ObjType_Frame &&
146                    src->type != ObjType_DevFrame &&
147                    src->type != ObjType_EndPointUMP) { // Right mapping
148                    debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
149                    return SYS_ERR_WRONG_MAPPING;
150                }
151
152                if (get_size(src) < LARGE_PAGE_SIZE) {
153                    return SYS_ERR_VM_FRAME_TOO_SMALL;
154                }
155
156                if ((get_address(src)+offset) & LARGE_PAGE_MASK) {
157                    return SYS_ERR_VM_FRAME_UNALIGNED;
158                }
159
160                page_size = X86_64_LARGE_PAGE_SIZE;
161
162                // check offset within frame
163                genpaddr_t off = offset;
164
165                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
166                    printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
167                            off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
168                    return SYS_ERR_FRAME_OFFSET_INVALID;
169                }
170                // Calculate page access protection flags /
171                // Get frame cap rights
172                flags_large = paging_x86_64_cap_to_page_flags(src->rights);
173                // Mask with provided access rights mask
174                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
175                // Add additional arch-specific flags
176                flags_large |= X86_64_PTABLE_FLAGS(flags);
177                // Unconditionally mark the page present
178                flags_large |= X86_64_PTABLE_PRESENT;
179
180            }
181            break;
182        case ObjType_VNode_x86_64_ept_pdir:
183            is_ept = true;
184            // superpage support
185            if (src->type != ObjType_VNode_x86_64_ept_ptable) { // Right mapping
186                page_size = X86_64_LARGE_PAGE_SIZE;
187
188                // check offset within frame
189                genpaddr_t off = offset;
190
191                if (off + pte_count * X86_64_LARGE_PAGE_SIZE > get_size(src)) {
192                    return SYS_ERR_FRAME_OFFSET_INVALID;
193                }
194                // Calculate page access protection flags /
195                // Get frame cap rights
196                // Mask with provided access rights mask
197                flags_large = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(flags));
198                // Add additional arch-specific flags
199                flags_large |= X86_64_PTABLE_FLAGS(flags);
200                // Unconditionally mark the page present
201                flags_large |= X86_64_PTABLE_PRESENT;
202            }
203            break;
204        default:
205            debug(SUBSYS_PAGING, "dest type invalid\n");
206            return SYS_ERR_DEST_TYPE_INVALID;
207    }
208
209    // Convert destination base address
210    genpaddr_t dest_gp   = get_address(dest);
211    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
212    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
213    // Convert source base address
214    genpaddr_t src_gp   = get_address(src);
215    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
216
217    // set metadata
218    create_mapping_cap(mapping_cte, src, cte_for_cap(dest),
219                       slot, pte_count);
220
221    // use standard paging structs even if we're mapping EPT entries
222    is_ept = false;
223
224    bool need_tlb_flush = false;
225    cslot_t last_slot = slot + pte_count;
226    for (; slot < last_slot; slot++, offset += page_size) {
227        // Destination
228        union x86_64_pdir_entry *entry = (union x86_64_pdir_entry *)dest_lv + slot;
229
230        if (X86_64_IS_PRESENT(entry)) {
231            // cleanup mapping info
232            // TODO: cleanup already mapped pages
233            //memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
234            //printf("slot in use\n");
235            //return SYS_ERR_VNODE_SLOT_INUSE;
236            need_tlb_flush = true;
237        }
238
239        // determine if we map a large/huge page or a normal entry
240        if (page_size == X86_64_LARGE_PAGE_SIZE)
241        {
242            //a large page is mapped
243//            if (is_ept) {
244//                paging_x86_64_ept_map_large((union x86_64_ept_ptable_entry *)entry,
245//                        src_lp + offset, flags_large);
246//            } else {
247                paging_x86_64_map_large((union x86_64_ptable_entry *)entry,
248                        src_lp + offset, flags_large);
249//            }
250        } else if (page_size == X86_64_HUGE_PAGE_SIZE) {
251            // a huge page is mapped
252 //           if (is_ept) {
253 //               paging_x86_64_ept_map_huge((union x86_64_ept_ptable_entry *)entry,
254 //                       src_lp + offset, flags_large);
255 //           } else {
256                paging_x86_64_map_huge((union x86_64_ptable_entry *)entry,
257                        src_lp + offset, flags_large);
258 //           }
259        } else {
260            //a normal paging structure entry is mapped
261  //          if (is_ept) {
262  //              paging_x86_64_ept_map_table(
263  //                      (union x86_64_ept_pdir_entry *)entry, src_lp + offset);
264  //          } else {
265                paging_x86_64_map_table(entry, src_lp + offset);
266  //          }
267        }
268    }
269    if (need_tlb_flush) {
270        //TODO: do hint-based selective flush for single page mapping
271        do_full_tlb_flush();
272    }
273
274    return SYS_ERR_OK;
275}
276
277/// Map within a x86_64 ptable
278static errval_t x86_64_ptable(struct capability *dest, cslot_t slot,
279                              struct capability *src, uintptr_t mflags,
280                              uintptr_t offset, size_t pte_count,
281                              struct cte *mapping_cte)
282{
283    //printf("page_mappings_arch:x86_64_ptable\n");
284    if (slot >= X86_64_PTABLE_SIZE) { // Within pagetable
285        debug(SUBSYS_PAGING, "    vnode_invalid\n");
286        return SYS_ERR_VNODE_SLOT_INVALID;
287    }
288
289    if (slot + pte_count > X86_64_PTABLE_SIZE) { // mapping size ok
290        debug(SUBSYS_PAGING, "mapping size invalid (%zd)\n", pte_count);
291        return SYS_ERR_VM_MAP_SIZE;
292    }
293
294    if (src->type != ObjType_Frame &&
295        src->type != ObjType_DevFrame &&
296        //(!type_is_ept(dest->type) &&
297        !type_is_vnode(src->type) &&
298        src->type != ObjType_EndPointUMP) { // Right mapping
299        debug(SUBSYS_PAGING, "src type invalid\n");
300        return SYS_ERR_WRONG_MAPPING;
301    }
302
303    // check offset within frame
304    genpaddr_t off = offset;
305    if (off + pte_count * X86_64_BASE_PAGE_SIZE > get_size(src)) {
306        debug(SUBSYS_PAGING, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
307                off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
308        printk(LOG_NOTE, "frame offset invalid: %zx > 0x%"PRIxGENSIZE"\n",
309                off + pte_count * X86_64_BASE_PAGE_SIZE, get_size(src));
310        char buf[256];
311        sprint_cap(buf,256,src);
312        printk(LOG_NOTE, "src = %s\n", buf);
313        return SYS_ERR_FRAME_OFFSET_INVALID;
314    }
315
316
317    /* Calculate page access protection flags */
318    // Get frame cap rights
319    paging_x86_64_flags_t flags;
320    if (src->type != ObjType_RAM) {
321        flags = paging_x86_64_cap_to_page_flags(src->rights);
322    } else {
323        flags = X86_64_PTABLE_READ_WRITE | X86_64_PTABLE_USER_SUPERVISOR;
324    }
325    // Mask with provided access rights mask
326    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
327    // Add additional arch-specific flags
328    flags |= X86_64_PTABLE_FLAGS(mflags);
329    // Unconditionally mark the page present
330    flags |= X86_64_PTABLE_PRESENT;
331    // 1. Make sure non-guest page-tables are never writeable from user-space
332    // 2. ptables mapped in EPT tables need to writeable
333    if (type_is_vnode(src->type) &&
334        !type_is_ept(dest->type) &&
335        !dcb_current->is_vm_guest)
336    {
337        if (flags & X86_64_PTABLE_READ_WRITE) {
338            return SYS_ERR_VM_MAP_RIGHTS;
339        }
340    }
341
342
343    // Convert destination base address
344    genpaddr_t dest_gp   = get_address(dest);
345    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
346    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
347    // Convert source base address
348    genpaddr_t src_gp   = get_address(src);
349    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
350    // Set metadata
351    create_mapping_cap(mapping_cte, src, cte_for_cap(dest),
352                       slot, pte_count);
353
354    bool need_tlb_flush = false;
355    cslot_t last_slot = slot + pte_count;
356    for (; slot < last_slot; slot++, offset += X86_64_BASE_PAGE_SIZE) {
357        union x86_64_ptable_entry *entry =
358            (union x86_64_ptable_entry *)dest_lv + slot;
359
360        if (X86_64_IS_PRESENT(entry)) {
361            // TODO: cleanup already mapped pages
362            //memset(&src_cte->mapping_info, 0, sizeof(struct mapping_info));
363            //debug(LOG_WARN, "Trying to remap an already-present page is NYI, but "
364            //      "this is most likely a user-space bug!\n");
365            //return SYS_ERR_VNODE_SLOT_INUSE;
366            need_tlb_flush = true;
367        }
368
369        // Carry out the page mapping
370#if 0
371        if (type_is_ept(dest->type)) {
372            paging_x86_64_ept_map((union x86_64_ept_ptable_entry *)entry,
373                    src_lp + offset, flags);
374        } else {
375            paging_x86_64_map(entry, src_lp + offset, flags);
376        }
377#endif
378        paging_x86_64_map(entry, src_lp + offset, flags);
379    }
380    if (need_tlb_flush) {
381        // TODO: do hint-based selective tlb flush if single page modified
382        do_full_tlb_flush();
383    }
384
385    return SYS_ERR_OK;
386}
387
388#include <dev/vtd_dev.h>
389
390/// Map within a x86_64 non leaf ptable
391static errval_t x86_64_vtd_table(struct capability *dest, cslot_t slot,
392                                  struct capability *src, uintptr_t flags,
393                                  uintptr_t offset, size_t pte_count,
394                                  struct cte *mapping_cte)
395{
396    printf("page_mappings_arch:x86_64_vtd_table\n");
397    if (slot >= 256) { // Within pagetable
398        return SYS_ERR_VNODE_SLOT_INVALID;
399    }
400
401    if (pte_count != 1) {
402        // only allow single ptable mappings
403        debug(SUBSYS_PAGING, "src type and count mismatch\n");
404        return SYS_ERR_VM_MAP_SIZE;
405    }
406
407    vtd_addr_width_t agaw = 0;
408    switch (dest->type) {
409        case ObjType_VNode_VTd_root_table :
410            if (src->type != ObjType_VNode_VTd_ctxt_table) {
411                debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
412                return SYS_ERR_WRONG_MAPPING;
413            }
414
415            break;
416        case ObjType_VNode_VTd_ctxt_table :
417            switch(src->type) {
418                case ObjType_VNode_x86_64_pml4:
419                    agaw = vtd_agaw48;
420                    break;
421                case ObjType_VNode_x86_64_pml5:
422                    agaw = vtd_agaw57;
423                    break;
424                case ObjType_VNode_x86_64_pdpt:
425                    agaw = vtd_agaw39;
426                    break;
427                default:
428                    debug(SUBSYS_PAGING, "src type invalid: %d\n", src->type);
429                    return SYS_ERR_WRONG_MAPPING;
430            }
431            break;
432        default:
433            debug(SUBSYS_PAGING, "dest type invalid\n");
434            return SYS_ERR_DEST_TYPE_INVALID;
435    }
436
437    // Convert destination base address
438    genpaddr_t dest_gp   = get_address(dest);
439    lpaddr_t dest_lp     = gen_phys_to_local_phys(dest_gp);
440    lvaddr_t dest_lv     = local_phys_to_mem(dest_lp);
441    // Convert source base address
442    genpaddr_t src_gp   = get_address(src);
443    lpaddr_t src_lp     = gen_phys_to_local_phys(src_gp);
444
445    switch (dest->type) {
446        case ObjType_VNode_VTd_root_table : {
447            vtd_root_entry_t rt = (vtd_root_entry_t) (dest_lv + slot *
448                                                                vtd_root_entry_size);
449
450            if (vtd_root_entry_p_extract(rt)) {
451                return SYS_ERR_VNODE_SLOT_INUSE;
452            }
453            vtd_root_entry_ctp_insert(rt, src_lp >> BASE_PAGE_BITS);
454            vtd_root_entry_p_insert(rt, 1);
455
456            break;
457        }
458        case ObjType_VNode_VTd_ctxt_table : {
459
460            vtd_ctxt_entry_t ct = (vtd_ctxt_entry_t)(
461                    dest_lv + slot * vtd_ctxt_entry_size);
462            if (vtd_ctxt_entry_p_extract(ct)) {
463                return SYS_ERR_VNODE_SLOT_INUSE;
464            }
465
466            uint16_t domid = (flags >> 8) & 0xffff;
467
468            /* Don't support device TLB's */
469            vtd_ctxt_entry_t_insert(ct, vtd_hmd);
470            vtd_ctxt_entry_aw_insert(ct, agaw);
471            vtd_ctxt_entry_did_insert(ct, domid);
472
473            #if 0
474            if (device_tlbs_supported) {
475             00b: Untranslated requests are translated using second-level
476                  paging structures referenced through SLPTPTR field. Translated
477                  requests and Translation Requests are blocked.
478             01b: Untranslated, Translated and Translation Requests are
479                  supported. This encoding is treated as reserved by hardware
480                  implementations not supporting Device-TLBs (DT=0 in Extended
481                  Capability Register).
482             10b: Untranslated requests are processed as pass-through.
483                  SLPTPTR field is ignored by hardware. Translated and Translation
484                  Requests are blocked. This encoding is treated by hardware as
485                  reserved for hardware implementations not supporting Pass Through
486            #define vtd_hmd ((vtd_translation_type_t)0x0)
487            #define vtd_hme ((vtd_translation_type_t)0x1)
488            #define vtd_ptm ((vtd_translation_type_t)0x2)
489
490            }
491            #endif
492            vtd_ctxt_entry_t_insert(ct, vtd_hmd);
493
494            /* flush the cache */
495            wbinvd();
496
497            vtd_ctxt_entry_slptptr_insert(ct, (src_lp >> BASE_PAGE_BITS));
498            vtd_ctxt_entry_p_insert(ct, 1);
499
500            break;
501        }
502        default:
503            debug(SUBSYS_PAGING, "dest type invalid\n");
504            return SYS_ERR_DEST_TYPE_INVALID;
505    }
506
507    // set metadata
508    create_mapping_cap(mapping_cte, src, cte_for_cap(dest),
509                       slot, pte_count);
510
511
512    return SYS_ERR_OK;
513}
514
515
516typedef errval_t (*mapping_handler_t)(struct capability *dest_cap,
517                                      cslot_t dest_slot,
518                                      struct capability *src_cap,
519                                      uintptr_t flags, uintptr_t offset,
520                                      size_t pte_count,
521                                      struct cte *mapping_cte);
522
523/// Dispatcher table for the type of mapping to create
524static mapping_handler_t handler[ObjType_Num] = {
525    [ObjType_VNode_VTd_root_table]  = x86_64_vtd_table,
526    [ObjType_VNode_VTd_ctxt_table]  = x86_64_vtd_table,
527    [ObjType_VNode_x86_64_pml5]     = x86_64_non_ptable,
528    [ObjType_VNode_x86_64_pml4]     = x86_64_non_ptable,
529    [ObjType_VNode_x86_64_pdpt]     = x86_64_non_ptable,
530    [ObjType_VNode_x86_64_pdir]     = x86_64_non_ptable,
531    [ObjType_VNode_x86_64_ptable]   = x86_64_ptable,
532    [ObjType_VNode_x86_64_ept_pml4]   = x86_64_non_ptable,
533    [ObjType_VNode_x86_64_ept_pdpt]   = x86_64_non_ptable,
534    [ObjType_VNode_x86_64_ept_pdir]   = x86_64_non_ptable,
535    [ObjType_VNode_x86_64_ept_ptable] = x86_64_ptable,
536};
537
538
539/// Create page mappings
540errval_t caps_copy_to_vnode(struct cte *dest_vnode_cte, cslot_t dest_slot,
541                            struct cte *src_cte, uintptr_t flags,
542                            uintptr_t offset, uintptr_t pte_count,
543                            struct cte *mapping_cte)
544{
545    assert(type_is_vnode(dest_vnode_cte->cap.type));
546    assert(mapping_cte->cap.type == ObjType_Null);
547
548    struct capability *src_cap  = &src_cte->cap;
549    struct capability *dest_cap = &dest_vnode_cte->cap;
550    mapping_handler_t handler_func = handler[dest_cap->type];
551
552    assert(handler_func != NULL);
553
554#if 0
555    genpaddr_t paddr = get_address(&src_cte->cap) + offset;
556    genvaddr_t vaddr;
557    compile_vaddr(dest_vnode_cte, dest_slot, &vaddr);
558    printk(LOG_NOTE, "mapping 0x%"PRIxGENPADDR" to 0x%"PRIxGENVADDR"\n", paddr, vaddr);
559#endif
560
561    cslot_t last_slot = dest_slot + pte_count;
562
563    if (last_slot > X86_64_PTABLE_SIZE) {
564        // requested map overlaps leaf page table
565        debug(SUBSYS_CAPS,
566                "caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
567        debug(SUBSYS_PAGING,
568                "first_slot = %"PRIuCSLOT", last_slot = %"PRIuCSLOT", count = %zu\n",
569                dest_slot, last_slot, pte_count);
570        return SYS_ERR_VM_RETRY_SINGLE;
571    }
572
573    errval_t r = handler_func(dest_cap, dest_slot, src_cap, flags, offset,
574                              pte_count, mapping_cte);
575    if (err_is_fail(r)) {
576        assert(mapping_cte->cap.type == ObjType_Null);
577        debug(SUBSYS_PAGING, "caps_copy_to_vnode: handler func returned %ld\n", r);
578        return r;
579    }
580
581    /* insert mapping cap into mdb */
582    assert(type_is_mapping(mapping_cte->cap.type));
583    errval_t err = mdb_insert(mapping_cte);
584    if (err_is_fail(err)) {
585        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
586    }
587
588    TRACE_CAP_MSG("created", mapping_cte);
589
590    return err;
591}
592
593static inline void read_pt_entry(struct capability *pgtable, size_t slot,
594                                 genpaddr_t *mapped_addr, lpaddr_t *pte,
595                                 void **entry)
596{
597    assert(type_is_vnode(pgtable->type));
598
599    genpaddr_t paddr = 0;
600    lpaddr_t pte_;
601    void *entry_;
602
603    genpaddr_t gp = get_address(pgtable);
604    lpaddr_t lp = gen_phys_to_local_phys(gp);
605    lvaddr_t lv = local_phys_to_mem(lp);
606
607    // get paddr
608    union x86_64_ptable_entry *e = NULL;
609    bool huge = false;
610    switch (pgtable->type) {
611    case ObjType_VNode_x86_64_pdpt:
612        huge = true;
613    case ObjType_VNode_x86_64_pml4:
614    case ObjType_VNode_x86_64_pdir: {
615        e = (union x86_64_ptable_entry *)lv + slot;
616        if (e->large.always1) {
617            if (huge) {
618                paddr = (lpaddr_t)e->huge.base_addr << HUGE_PAGE_BITS;
619            } else {
620                paddr = (lpaddr_t)e->large.base_addr << LARGE_PAGE_BITS;
621            }
622        } else {
623            union x86_64_pdir_entry *de =
624                (union x86_64_pdir_entry *)lv + slot;
625            paddr = (lpaddr_t)de->d.base_addr << BASE_PAGE_BITS;
626            entry_ = de;
627            pte_ = lp + slot * sizeof(union x86_64_pdir_entry);
628        }
629        break;
630    }
631    case ObjType_VNode_x86_64_ptable: {
632        e = (union x86_64_ptable_entry *)lv + slot;
633        paddr = (lpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
634        entry_ = e;
635        pte_ = lp + slot * sizeof(union x86_64_ptable_entry);
636        break;
637    }
638    default:
639        assert(!"Should not get here");
640    }
641
642    if (mapped_addr) {
643        *mapped_addr = paddr;
644    }
645    if (pte) {
646        *pte = pte_;
647    }
648    if (entry) {
649        *entry = entry_;
650    }
651
652    if (mapped_addr) {
653        *mapped_addr = paddr;
654    }
655
656    if (pte) {
657        *pte = pte_;
658    }
659
660    if (entry) {
661        *entry = entry_;
662    }
663}
664
665errval_t paging_copy_remap(struct cte *dest_vnode_cte, cslot_t dest_slot,
666                           struct cte *src_cte, uintptr_t flags,
667                           uintptr_t offset, uintptr_t pte_count,
668                           struct cte *mapping_cte)
669{
670    assert(type_is_vnode(dest_vnode_cte->cap.type));
671    assert(mapping_cte->cap.type == ObjType_Null);
672
673    struct capability *src_cap  = &src_cte->cap;
674    struct capability *dest_cap = &dest_vnode_cte->cap;
675    mapping_handler_t handler_func = handler[dest_cap->type];
676
677    assert(handler_func != NULL);
678
679    cslot_t last_slot = dest_slot + pte_count;
680
681    if (last_slot > X86_64_PTABLE_SIZE) {
682        // requested map overlaps leaf page table
683        printf("caps_copy_to_vnode: requested mapping spans multiple leaf page tables\n");
684        return SYS_ERR_VM_RETRY_SINGLE;
685    }
686
687    size_t page_size = BASE_PAGE_SIZE;
688    switch(dest_cap->type) {
689        case ObjType_VNode_x86_64_ptable:
690            page_size = BASE_PAGE_SIZE;
691            break;
692        case ObjType_VNode_x86_64_pdir:
693            page_size = LARGE_PAGE_SIZE;
694            break;
695        case ObjType_VNode_x86_64_pdpt:
696            page_size = HUGE_PAGE_SIZE;
697            break;
698        default:
699            printf("%s: unknown dest VNode: %d\n", __FUNCTION__, dest_cap->type);
700            break;
701    }
702
703    errval_t err;
704    // clone existing pages
705    lvaddr_t toaddr;
706    toaddr = local_phys_to_mem(gen_phys_to_local_phys(get_address(src_cap))) + offset;
707    genpaddr_t gpfromaddr = 0;
708    lvaddr_t fromaddr = 0;
709
710    read_pt_entry(dest_cap, dest_slot, &gpfromaddr, NULL, NULL);
711    if (!gpfromaddr) {
712        printf("%s: dest slot empty\n", __FUNCTION__);
713        return SYS_ERR_VNODE_NOT_INSTALLED;
714    }
715    fromaddr = local_phys_to_mem(gen_phys_to_local_phys(gpfromaddr));
716    memcpy((void*)toaddr, (void*)fromaddr, pte_count*page_size);
717
718    err = handler_func(dest_cap, dest_slot, src_cap, flags, offset, pte_count,
719                       mapping_cte);
720    if (err_is_fail(err)) {
721        printf("%s: handler func returned %ld\n", __FUNCTION__, err);
722        if (err_no(err) == SYS_ERR_WRONG_MAPPING) {
723            printk(LOG_NOTE, "dest->type = %d, src->type = %d\n",
724                    dest_cap->type, src_cap->type);
725        }
726        memset(mapping_cte, 0, sizeof(*mapping_cte));
727        return err;
728    }
729
730    /* insert mapping cap into mdb */
731    assert(type_is_mapping(mapping_cte->cap.type));
732    err = mdb_insert(mapping_cte);
733    if (err_is_fail(err)) {
734        printk(LOG_ERR, "%s: mdb_insert: %"PRIuERRV"\n", __FUNCTION__, err);
735        return err;
736    }
737
738    return err;
739}
740
741__attribute__((unused))
742static inline lvaddr_t get_leaf_ptable_for_vaddr(genvaddr_t vaddr)
743{
744    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);
745
746    // get pdpt
747    union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + X86_64_PML4_BASE(vaddr);
748    if (!pdpt->raw) { return 0; }
749    genpaddr_t pdpt_gp = pdpt->d.base_addr << BASE_PAGE_BITS;
750    lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
751    // get pdir
752    union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + X86_64_PDPT_BASE(vaddr);
753    if (!pdir->raw) { return 0; }
754    genpaddr_t pdir_gp = pdir->d.base_addr << BASE_PAGE_BITS;
755    lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
756    // get ptable
757    union x86_64_ptable_entry *ptable = (union x86_64_ptable_entry *)pdir_lv + X86_64_PDIR_BASE(vaddr);
758    if (!ptable->raw) { return 0; }
759    genpaddr_t ptable_gp = ptable->base.base_addr << BASE_PAGE_BITS;
760    lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
761
762    return ptable_lv;
763}
764
765size_t do_unmap(lvaddr_t pt, cslot_t slot, size_t num_pages)
766{
767    // iterate over affected leaf ptables
768    size_t unmapped_pages = 0;
769    union x86_64_ptable_entry *ptentry = (union x86_64_ptable_entry *)pt + slot;
770    for (int i = 0; i < num_pages; i++) {
771        ptentry++->raw = 0;
772        unmapped_pages++;
773    }
774    return unmapped_pages;
775}
776
777static size_t ptable_type_get_page_size(enum objtype type)
778{
779    switch(type) {
780        case ObjType_VNode_x86_64_ptable:
781            return BASE_PAGE_SIZE;
782        case ObjType_VNode_x86_64_pdir:
783            return LARGE_PAGE_SIZE;
784        case ObjType_VNode_x86_64_pdpt:
785            return HUGE_PAGE_SIZE;
786        case ObjType_VNode_x86_64_pml4:
787            return 0;
788
789        default:
790            assert(!"Type not x86_64 vnode");
791    }
792    return 0;
793}
794
795/**
796 * \brief modify flags of entries in `leaf_pt`.
797 *
798 * \arg leaf_pt the frame whose mapping should be modified
799 * \arg offset the offset from the first page table entry in entries
800 * \arg pages the number of pages to modify
801 * \arg flags the new flags
802 * \arg va_hint a user-supplied virtual address for hinting selective TLB
803 *              flushing
804 */
805static errval_t generic_modify_flags(struct cte *leaf_pt, size_t offset,
806                                     size_t pages,
807                                     paging_x86_64_flags_t flags)
808{
809    lvaddr_t base = local_phys_to_mem(get_address(&leaf_pt->cap)) +
810        offset * sizeof(union x86_64_ptable_entry);
811
812    switch(leaf_pt->cap.type) {
813        case ObjType_VNode_x86_64_ptable :
814            for (int i = 0; i < pages; i++) {
815                union x86_64_ptable_entry *entry =
816                    (union x86_64_ptable_entry *)base + i;
817                if (entry->base.present) {
818                    paging_x86_64_modify_flags(entry, flags);
819                }
820            }
821            break;
822        case ObjType_VNode_x86_64_pdir :
823            for (int i = 0; i < pages; i++) {
824                union x86_64_ptable_entry *entry =
825                    (union x86_64_ptable_entry *)base + i;
826                if (entry->large.present && entry->large.always1) {
827                    paging_x86_64_modify_flags_large(entry, flags);
828                } else if (((union x86_64_pdir_entry*)entry)->d.present) {
829                    paging_x86_64_pdir_modify_flags((union x86_64_pdir_entry*)entry, flags);
830                }
831            }
832            break;
833        case ObjType_VNode_x86_64_pdpt :
834            for (int i = 0; i < pages; i++) {
835                union x86_64_ptable_entry *entry =
836                    (union x86_64_ptable_entry *)base + i;
837                if (entry->large.present && entry->large.always1) {
838                    paging_x86_64_modify_flags_huge(entry, flags);
839                } else if (((union x86_64_pdir_entry*)entry)->d.present) {
840                    paging_x86_64_pdir_modify_flags((union x86_64_pdir_entry*)entry, flags);
841                }
842            }
843            break;
844        case ObjType_VNode_x86_64_pml4 :
845            for (int i = 0; i < pages; i++) {
846                union x86_64_pdir_entry *entry =
847                    (union x86_64_pdir_entry *)base + i;
848                if (entry->d.present) {
849                    paging_x86_64_pdir_modify_flags(entry, flags);
850                }
851            }
852            break;
853        default:
854            return SYS_ERR_VNODE_TYPE;
855    }
856
857    return SYS_ERR_OK;
858}
859
860/**
861 * \brief modify flags of mapping `mapping`.
862 *
863 * \arg mapping the mapping to modify
864 * \arg offset the offset from the first page table entry in entries
865 * \arg pages the number of pages to modify
866 * \arg flags the new flags
867 * \arg va_hint a user-supplied virtual address for hinting selective TLB
868 *              flushing
869 */
870errval_t page_mappings_modify_flags(struct capability *mapping, size_t offset,
871                                    size_t pages, size_t mflags, genvaddr_t va_hint)
872{
873    assert(type_is_mapping(mapping->type));
874    struct Frame_Mapping *info = &mapping->u.frame_mapping;
875
876    /* Calculate page access protection flags */
877    // Get frame cap rights
878    paging_x86_64_flags_t flags =
879        paging_x86_64_cap_to_page_flags(info->cap->rights);
880    // Mask with provided access rights mask
881    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
882    // Add additional arch-specific flags
883    flags |= X86_64_PTABLE_FLAGS(mflags);
884    // Unconditionally mark the page present
885    flags |= X86_64_PTABLE_PRESENT;
886
887    // check arguments
888    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
889        return SYS_ERR_VNODE_SLOT_INVALID;
890    }
891    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
892        return SYS_ERR_VM_MAP_SIZE;
893    }
894
895    // get pt cap to figure out page size
896    struct cte *leaf_pt = info->ptable;
897    if (!type_is_vnode(leaf_pt->cap.type)) {
898        return SYS_ERR_VNODE_TYPE;
899    }
900    assert(type_is_vnode(leaf_pt->cap.type));
901    // add first pte location from mapping cap to user supplied offset
902    offset += info->entry;
903
904    errval_t err;
905    err = generic_modify_flags(leaf_pt, offset, pages, flags);
906    if (err_is_fail(err)) {
907        return err;
908    }
909
910    size_t pagesize = ptable_type_get_page_size(leaf_pt->cap.type);
911    if (va_hint != 0 && va_hint > BASE_PAGE_SIZE) {
912        debug(SUBSYS_PAGING,
913                "selective flush: 0x%"PRIxGENVADDR"--0x%"PRIxGENVADDR"\n",
914                va_hint, va_hint + pages * pagesize);
915        // use as direct hint
916        // invlpg should work for large/huge pages
917        for (int i = 0; i < pages; i++) {
918            do_one_tlb_flush(va_hint + i * pagesize);
919        }
920    } else if (va_hint == 1) {
921        // XXX: remove this or cleanup interface, -SG, 2015-03-11
922        // do computed selective flush
923        debug(SUBSYS_PAGING, "computed selective flush\n");
924        return paging_tlb_flush_range(cte_for_cap(mapping), offset, pages);
925    } else {
926        debug(SUBSYS_PAGING, "full flush\n");
927        /* do full TLB flush */
928        do_full_tlb_flush();
929    }
930
931    return SYS_ERR_OK;
932}
933
934errval_t ptable_modify_flags(struct capability *leaf_pt, size_t offset,
935                                    size_t pages, size_t mflags)
936{
937    /* Calculate page access protection flags */
938    // Mask with provided access rights mask
939    paging_x86_64_flags_t flags = X86_64_PTABLE_USER_SUPERVISOR;
940    flags = paging_x86_64_mask_attrs(flags, X86_64_PTABLE_ACCESS(mflags));
941    // Add additional arch-specific flags
942    flags |= X86_64_PTABLE_FLAGS(mflags);
943    // Unconditionally mark the page present
944    flags |= X86_64_PTABLE_PRESENT;
945
946    // check arguments
947    if (offset >= X86_64_PTABLE_SIZE) { // Within pagetable
948        return SYS_ERR_VNODE_SLOT_INVALID;
949    }
950    if (offset + pages > X86_64_PTABLE_SIZE) { // mapping size ok
951        return SYS_ERR_VM_MAP_SIZE;
952    }
953
954    errval_t err = generic_modify_flags(cte_for_cap(leaf_pt), offset, pages, flags);
955
956    do_full_tlb_flush();
957
958    return err;
959}
960
961bool paging_is_region_valid(lvaddr_t buffer, size_t size, uint8_t type)
962{
963    lvaddr_t root_pt = local_phys_to_mem(dcb_current->vspace);
964
965    lvaddr_t end = buffer + size;
966
967    uint16_t first_pml4e = X86_64_PML4_BASE(buffer);
968    uint16_t last_pml4e = X86_64_PML4_BASE(end);
969
970    uint16_t first_pdpte, first_pdire, first_pte;
971        uint16_t last_pdpte, last_pdire, last_pte;
972
973
974    union x86_64_ptable_entry *pte;
975    union x86_64_pdir_entry *pde;
976
977    for (uint16_t pml4e = first_pml4e; pml4e <= last_pml4e; pml4e++) {
978        pde = (union x86_64_pdir_entry *)root_pt + pml4e;
979        if (!pde->d.present) { return false; }
980        if (type == ACCESS_WRITE && !pde->d.read_write) { return false; }
981        // calculate which part of pdpt to check
982        first_pdpte = pml4e == first_pml4e ? X86_64_PDPT_BASE(buffer) : 0;
983        last_pdpte  = pml4e == last_pml4e  ? X86_64_PDPT_BASE(end)  : PTABLE_ENTRIES;
984        // read pdpt base
985        lvaddr_t pdpt = local_phys_to_mem((genpaddr_t)pde->d.base_addr << BASE_PAGE_BITS);
986        for (uint16_t pdptidx = first_pdpte; pdptidx <= last_pdpte; pdptidx++) {
987            pde = (union x86_64_pdir_entry *)pdpt + pdptidx;
988            if (!pde->d.present) { return false; }
989            if (type == ACCESS_WRITE && !pde->d.read_write) { return false; }
990            pte = (union x86_64_ptable_entry *)pde;
991            if (!pte->huge.always1) {
992                // calculate which part of pdpt to check
993                first_pdire = pdptidx == first_pdpte ? X86_64_PDIR_BASE(buffer) : 0;
994                last_pdire  = pdptidx == last_pdpte  ? X86_64_PDIR_BASE(end)  : PTABLE_ENTRIES;
995                // read pdpt base
996                lvaddr_t pdir = local_phys_to_mem((genpaddr_t)pde->d.base_addr << BASE_PAGE_BITS);
997                for (uint16_t pdiridx = first_pdire; pdiridx <= last_pdire; pdiridx++) {
998                    pde = (union x86_64_pdir_entry *)pdir + pdiridx;
999                    if (!pde->d.present) { return false; }
1000                    if (type == ACCESS_WRITE && !pde->d.read_write) { return false; }
1001                    pte = (union x86_64_ptable_entry *)pde;
1002                    if (!pte->large.always1) {
1003                        // calculate which part of pdpt to check
1004                        first_pte = pdiridx == first_pdire ? X86_64_PTABLE_BASE(buffer) : 0;
1005                        last_pte  = pdiridx == last_pdire  ? X86_64_PTABLE_BASE(end)  : PTABLE_ENTRIES;
1006                        // read pdpt base
1007                        lvaddr_t pt = local_phys_to_mem((genpaddr_t)pde->d.base_addr << BASE_PAGE_BITS);
1008                        for (uint16_t ptidx = first_pte; ptidx < last_pte; ptidx++) {
1009                            pte = (union x86_64_ptable_entry *)pt + ptidx;
1010                            if (!pte->base.present) { return false; }
1011                            if (type == ACCESS_WRITE && !pte->base.read_write) { return false; }
1012                        }
1013                    }
1014                }
1015            }
1016        }
1017    }
1018    // if we never bailed early, the access is fine.
1019    return true;
1020}
1021
1022void paging_dump_tables_around(struct dcb *dispatcher, lvaddr_t vaddr)
1023{
1024    if (!local_phys_is_valid(dispatcher->vspace)) {
1025        printk(LOG_ERR, "dispatcher->vspace = 0x%"PRIxLPADDR": too high!\n" ,
1026               dispatcher->vspace);
1027        return;
1028    }
1029    lvaddr_t root_pt = local_phys_to_mem(dispatcher->vspace);
1030
1031    uint16_t first_pml4e = 0, last_pml4e = X86_64_PML4_BASE(X86_64_MEMORY_OFFSET);
1032
1033    if (vaddr) {
1034        first_pml4e = X86_64_PML4_BASE(vaddr);
1035        last_pml4e = first_pml4e + 1;
1036        printk(LOG_NOTE, "printing page tables for PML4e %hu\n", first_pml4e);
1037    }
1038
1039    // loop over pdpts
1040    union x86_64_ptable_entry *pt;
1041    for (int pdpt_index = first_pml4e; pdpt_index < last_pml4e; pdpt_index++) {
1042        union x86_64_pdir_entry *pdpt = (union x86_64_pdir_entry *)root_pt + pdpt_index;
1043        if (!pdpt->d.present) { continue; }
1044        else {
1045            genpaddr_t paddr = (genpaddr_t)pdpt->d.base_addr << BASE_PAGE_BITS;
1046            printf("%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
1047                    pdpt_index, paddr,
1048                    pdpt->d.read_write, pdpt->d.user_supervisor,
1049                    pdpt->raw);
1050        }
1051        genpaddr_t pdpt_gp = (genpaddr_t)pdpt->d.base_addr << BASE_PAGE_BITS;
1052        lvaddr_t pdpt_lv = local_phys_to_mem(gen_phys_to_local_phys(pdpt_gp));
1053
1054        for (int pdir_index = 0; pdir_index < X86_64_PTABLE_SIZE; pdir_index++) {
1055            // get pdir
1056            union x86_64_pdir_entry *pdir = (union x86_64_pdir_entry *)pdpt_lv + pdir_index;
1057            pt = (union x86_64_ptable_entry*)pdir;
1058            if (!pdir->d.present) { continue; }
1059            // check if pdir or huge page
1060            if (pt->huge.always1) {
1061                // is huge page mapping
1062                genpaddr_t paddr = (genpaddr_t)pt->huge.base_addr << HUGE_PAGE_BITS;
1063                printf("%d.%d: 0x%"PRIxGENPADDR" (%d %d %d), raw=0x%"PRIx64"\n", pdpt_index,
1064                        pdir_index, paddr, pt->huge.read_write,
1065                        pt->huge.dirty, pt->huge.accessed, pt->raw);
1066                // goto next pdpt entry
1067                continue;
1068            } else {
1069                genpaddr_t paddr = (genpaddr_t)pdir->d.base_addr << BASE_PAGE_BITS;
1070                printf("%d.%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
1071                        pdpt_index, pdir_index, paddr,
1072                        pdir->d.read_write, pdir->d.user_supervisor,
1073                        pdir->raw);
1074            }
1075            genpaddr_t pdir_gp = (genpaddr_t)pdir->d.base_addr << BASE_PAGE_BITS;
1076            lvaddr_t pdir_lv = local_phys_to_mem(gen_phys_to_local_phys(pdir_gp));
1077
1078            for (int ptable_index = 0; ptable_index < X86_64_PTABLE_SIZE; ptable_index++) {
1079                // get ptable
1080                union x86_64_pdir_entry *ptable = (union x86_64_pdir_entry *)pdir_lv + ptable_index;
1081                pt = (union x86_64_ptable_entry *)ptable;
1082                if (!ptable->d.present) { continue; }
1083                // check if ptable or large page
1084                if (pt->large.always1) {
1085                    // is large page mapping
1086                    genpaddr_t paddr = (genpaddr_t)pt->large.base_addr << LARGE_PAGE_BITS;
1087                    printf("%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d %d), raw=0x%"PRIx64"\n",
1088                            pdpt_index, pdir_index, ptable_index, paddr,
1089                            pt->large.read_write, pt->large.dirty,
1090                            pt->large.accessed, pt->raw);
1091                    // goto next pdir entry
1092                    continue;
1093                } else {
1094                    genpaddr_t paddr = (genpaddr_t)ptable->d.base_addr << BASE_PAGE_BITS;
1095                    printf("%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d), raw=0x%"PRIx64"\n",
1096                            pdpt_index, pdir_index, ptable_index, paddr,
1097                            ptable->d.read_write, ptable->d.user_supervisor,
1098                            ptable->raw);
1099                }
1100                genpaddr_t ptable_gp = (genpaddr_t)ptable->d.base_addr << BASE_PAGE_BITS;
1101                lvaddr_t ptable_lv = local_phys_to_mem(gen_phys_to_local_phys(ptable_gp));
1102
1103                for (int entry = 0; entry < X86_64_PTABLE_SIZE; entry++) {
1104                    union x86_64_ptable_entry *e =
1105                        (union x86_64_ptable_entry *)ptable_lv + entry;
1106                    if (!e->base.present) { continue; }
1107                    genpaddr_t paddr = (genpaddr_t)e->base.base_addr << BASE_PAGE_BITS;
1108                    printf("%d.%d.%d.%d: 0x%"PRIxGENPADDR" (%d %d %d), raw=0x%"PRIx64"\n",
1109                            pdpt_index, pdir_index, ptable_index, entry,
1110                            paddr, e->base.read_write, e->base.dirty, e->base.accessed,
1111                            e->raw);
1112                }
1113            }
1114        }
1115    }
1116}
1117
1118void paging_dump_tables(struct dcb *dispatcher)
1119{
1120    return paging_dump_tables_around(dispatcher, 0);
1121}
1122