1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12#include <autoconf.h>
13#include <sel4/sel4.h>
14
15#ifdef CONFIG_ARCH_X86
16#include <platsupport/arch/tsc.h>
17#endif
18
19#define N_ASID_POOLS ((int)BIT(seL4_NumASIDPoolsBits))
20#define ASID_POOL_SIZE ((int)BIT(seL4_ASIDPoolIndexBits))
21
22#include "../helpers.h"
23
24static int remote_function(void)
25{
26    return 42;
27}
28
29static int test_interas_diffcspace(env_t env)
30{
31    helper_thread_t t;
32
33    create_helper_process(env, &t);
34
35    start_helper(env, &t, (helper_fn_t) remote_function, 0, 0, 0, 0);
36    seL4_Word ret = wait_for_helper(&t);
37    test_assert(ret == 42);
38    cleanup_helper(env, &t);
39
40    return sel4test_get_result();
41}
42DEFINE_TEST(VSPACE0000, "Test threads in different cspace/vspace", test_interas_diffcspace, true)
43
44#if defined(CONFIG_ARCH_AARCH32)
45static int
46test_unmap_after_delete(env_t env)
47{
48    seL4_Word map_addr = 0x10000000;
49    cspacepath_t path;
50    int error;
51
52    seL4_CPtr pd = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageDirectoryObject, 0);
53    seL4_CPtr pt = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageTableObject, 0);
54    seL4_CPtr frame = vka_alloc_object_leaky(&env->vka, seL4_ARM_SmallPageObject, 0);
55    test_assert(pd != 0);
56    test_assert(pt != 0);
57    test_assert(frame != 0);
58
59    seL4_ARM_ASIDPool_Assign(env->asid_pool, pd);
60
61    /* map page table into page directory */
62    error = seL4_ARM_PageTable_Map(pt, pd, map_addr, seL4_ARM_Default_VMAttributes);
63    test_error_eq(error, seL4_NoError);
64
65    /* map frame into the page table */
66    error = seL4_ARM_Page_Map(frame, pd, map_addr, seL4_AllRights, seL4_ARM_Default_VMAttributes);
67    test_error_eq(error, seL4_NoError);
68
69    /* delete the page directory */
70    vka_cspace_make_path(&env->vka, pd, &path);
71    seL4_CNode_Delete(path.root, path.capPtr, path.capDepth);
72
73    /* unmap the frame */
74    seL4_ARM_Page_Unmap(frame);
75
76    return sel4test_get_result();
77}
78DEFINE_TEST(VSPACE0001, "Test unmapping a page after deleting the PD", test_unmap_after_delete, true)
79
80#elif defined(CONFIG_ARCH_AARCH64)
81static int
82test_unmap_after_delete(env_t env)
83{
84    seL4_Word map_addr = 0x10000000;
85    cspacepath_t path;
86    int error;
87
88    seL4_CPtr pgd = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageGlobalDirectoryObject, 0);
89    seL4_CPtr pud = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageUpperDirectoryObject, 0);
90    seL4_CPtr pd = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageDirectoryObject, 0);
91    seL4_CPtr pt = vka_alloc_object_leaky(&env->vka, seL4_ARM_PageTableObject, 0);
92    seL4_CPtr frame = vka_alloc_object_leaky(&env->vka, seL4_ARM_SmallPageObject, 0);
93    /* Under an Arm Hyp configuration where the CPU only supports 40bit physical addressing, we
94     * only have 3 level page tables and no PGD.
95     */
96    test_assert((seL4_PGDBits == 0) || pgd != 0);
97    test_assert(pud != 0);
98    test_assert(pd != 0);
99    test_assert(pt != 0);
100    test_assert(frame != 0);
101
102
103    seL4_CPtr vspace = (seL4_PGDBits == 0) ? pud : pgd;
104    seL4_ARM_ASIDPool_Assign(env->asid_pool, vspace);
105#if seL4_PGDBits > 0
106    /* map pud into page global directory */
107    error = seL4_ARM_PageUpperDirectory_Map(pud, vspace, map_addr, seL4_ARM_Default_VMAttributes);
108    test_error_eq(error, seL4_NoError);
109#endif
110
111    /* map pd into page upper directory */
112    error = seL4_ARM_PageDirectory_Map(pd, vspace, map_addr, seL4_ARM_Default_VMAttributes);
113    test_error_eq(error, seL4_NoError);
114
115    /* map page table into page directory */
116    error = seL4_ARM_PageTable_Map(pt, vspace, map_addr, seL4_ARM_Default_VMAttributes);
117    test_error_eq(error, seL4_NoError);
118
119    /* map frame into the page table */
120    error = seL4_ARM_Page_Map(frame, vspace, map_addr, seL4_AllRights, seL4_ARM_Default_VMAttributes);
121    test_error_eq(error, seL4_NoError);
122
123    /* delete the page directory */
124    vka_cspace_make_path(&env->vka, vspace, &path);
125    seL4_CNode_Delete(path.root, path.capPtr, path.capDepth);
126
127    /* unmap the frame */
128    seL4_ARM_Page_Unmap(frame);
129
130    return sel4test_get_result();
131}
132DEFINE_TEST(VSPACE0001, "Test unmapping a page after deleting the PD", test_unmap_after_delete, true)
133#endif /* CONFIG_ARCH_AARCHxx */
134
135static int
136test_asid_pool_make(env_t env)
137{
138    vka_t *vka = &env->vka;
139    cspacepath_t path;
140    seL4_CPtr pool = vka_alloc_untyped_leaky(vka, seL4_PageBits);
141    test_assert(pool);
142    int ret = vka_cspace_alloc_path(vka, &path);
143    ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, path.capPtr, path.capDepth);
144    test_eq(ret, seL4_NoError);
145    ret = seL4_ARCH_ASIDPool_Assign(path.capPtr, env->page_directory);
146    test_eq(ret, seL4_InvalidCapability);
147    vka_object_t vspaceroot;
148    ret = vka_alloc_vspace_root(vka, &vspaceroot);
149    test_assert(!ret);
150    ret = seL4_ARCH_ASIDPool_Assign(path.capPtr, vspaceroot.cptr);
151    test_eq(ret, seL4_NoError);
152    return sel4test_get_result();
153
154}
155DEFINE_TEST(VSPACE0002, "Test create ASID pool", test_asid_pool_make, true)
156
157static int
158test_alloc_multi_asid_pools(env_t env)
159{
160    vka_t *vka = &env->vka;
161    seL4_CPtr pool;
162    cspacepath_t path;
163    int i, ret;
164
165    for (i = 0; i < N_ASID_POOLS - 1; i++) {    /* Obviously there is already one ASID allocated */
166        pool = vka_alloc_untyped_leaky(vka, seL4_PageBits);
167        test_assert(pool);
168        ret = vka_cspace_alloc_path(vka, &path);
169        ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, path.capPtr, path.capDepth);
170        test_eq(ret, seL4_NoError);
171    }
172    return sel4test_get_result();
173}
174DEFINE_TEST(VSPACE0003, "Test create multiple ASID pools", test_alloc_multi_asid_pools, true)
175
176static int
177test_run_out_asid_pools(env_t env)
178{
179    vka_t *vka = &env->vka;
180    seL4_CPtr pool;
181    cspacepath_t path;
182    int i, ret;
183
184    for (i = 0; i < N_ASID_POOLS - 1; i++) {
185        pool = vka_alloc_untyped_leaky(vka, seL4_PageBits);
186        test_assert(pool);
187        ret = vka_cspace_alloc_path(vka, &path);
188        ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, path.capPtr, path.capDepth);
189        test_eq(ret, seL4_NoError);
190    }
191    /* We do one more pool allocation that is supposed to fail (at this point there shouldn't be any more available) */
192    ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, path.capPtr, path.capDepth);
193    test_eq(ret, seL4_DeleteFirst);
194    return sel4test_get_result();
195}
196DEFINE_TEST(VSPACE0004, "Test running out of ASID pools", test_run_out_asid_pools, true)
197
198static int
199test_overassign_asid_pool(env_t env)
200{
201    vka_t *vka = &env->vka;
202    seL4_CPtr pool;
203    cspacepath_t path;
204    vka_object_t vspaceroot;
205    int i, ret;
206
207    pool = vka_alloc_untyped_leaky(vka, seL4_PageBits);
208    test_assert(pool);
209    ret = vka_cspace_alloc_path(vka, &path);
210    ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, path.capPtr, path.capDepth);
211    test_eq(ret, seL4_NoError);
212    for (i = 0; i < ASID_POOL_SIZE; i++) {
213        ret = vka_alloc_vspace_root(vka, &vspaceroot);
214        test_assert(!ret);
215        ret = seL4_ARCH_ASIDPool_Assign(path.capPtr, vspaceroot.cptr);
216        test_eq(ret, seL4_NoError);
217        if (ret != seL4_NoError) {
218            break;
219        }
220    }
221    test_eq(i, ASID_POOL_SIZE);
222    ret = vka_alloc_vspace_root(vka, &vspaceroot);
223    test_assert(!ret);
224    ret = seL4_ARCH_ASIDPool_Assign(path.capPtr, vspaceroot.cptr);
225    test_eq(ret, seL4_DeleteFirst);
226    return sel4test_get_result();
227}
228DEFINE_TEST(VSPACE0005, "Test overassigning ASID pool", test_overassign_asid_pool, true)
229
230static char
231incr_mem(seL4_Word tag)
232{
233    unsigned int *test = (void *)0x10000000;
234
235    *test = tag;
236    return *test;
237}
238
239static int test_create_asid_pools_and_touch(env_t env)
240{
241    vka_t *vka = &env->vka;
242    seL4_CPtr pool;
243    cspacepath_t poolCap;
244    helper_thread_t t;
245    int i, ret;
246
247    for (i = 0; i < N_ASID_POOLS - 1; i++) {
248        pool = vka_alloc_untyped_leaky(vka, seL4_PageBits);
249        test_assert(pool);
250        ret = vka_cspace_alloc_path(vka, &poolCap);
251        ret = seL4_ARCH_ASIDControl_MakePool(env->asid_ctrl, pool, env->cspace_root, poolCap.capPtr, poolCap.capDepth);
252        test_eq(ret, seL4_NoError);
253
254        create_helper_process_custom_asid(env, &t, poolCap.capPtr);
255        start_helper(env, &t, (helper_fn_t) incr_mem, i, 0, 0, 0);
256        ret = wait_for_helper(&t);
257        test_eq(ret, i);
258        cleanup_helper(env, &t);
259    }
260    return sel4test_get_result();
261}
262DEFINE_TEST(VSPACE0006, "Test touching all available ASID pools", test_create_asid_pools_and_touch, true)
263
264#ifdef CONFIG_ARCH_IA32
265static int
266test_dirty_accessed_bits(env_t env)
267{
268    seL4_CPtr frame;
269    int err;
270    seL4_X86_PageDirectory_GetStatusBits_t status;
271
272    void *vaddr;
273    reservation_t reservation;
274
275    reservation = vspace_reserve_range(&env->vspace,
276                                       PAGE_SIZE_4K, seL4_AllRights, 1, &vaddr);
277    test_assert(reservation.res);
278
279    /* Create a frame */
280    frame = vka_alloc_frame_leaky(&env->vka, PAGE_BITS_4K);
281    test_assert(frame != seL4_CapNull);
282
283    /* Map it in */
284    err = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation);
285    test_assert(!err);
286
287    /* Check the status flags */
288    status = seL4_X86_PageDirectory_GetStatusBits(vspace_get_root(&env->vspace), (seL4_Word)vaddr);
289    test_assert(!status.error);
290    test_assert(!status.accessed);
291    test_assert(!status.dirty);
292    /* try and prevent prefetcher */
293    rdtsc_cpuid();
294
295    /* perform a read and check status flags */
296    asm volatile("" :: "r"(*(uint32_t *)vaddr) : "memory");
297    status = seL4_X86_PageDirectory_GetStatusBits(vspace_get_root(&env->vspace), (seL4_Word)vaddr);
298    test_assert(!status.error);
299    test_assert(status.accessed);
300    test_assert(!status.dirty);
301    /* try and prevent prefetcher */
302    rdtsc_cpuid();
303
304    /* perform a write and check status flags */
305    *(uint32_t *)vaddr = 42;
306    asm volatile("" ::: "memory");
307    status = seL4_X86_PageDirectory_GetStatusBits(vspace_get_root(&env->vspace), (seL4_Word)vaddr);
308    test_assert(!status.error);
309    test_assert(status.accessed);
310    test_assert(status.dirty);
311
312    return sel4test_get_result();
313}
314DEFINE_TEST(VSPACE0010, "Test dirty and accessed bits on mappings", test_dirty_accessed_bits, true)
315#endif
316