1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13#include <autoconf.h>
14#include <sel4test-driver/gen_config.h>
15#include <assert.h>
16#include <stddef.h>
17#include <stdio.h>
18#include <sel4/sel4.h>
19#include <vka/object.h>
20#include <sel4utils/util.h>
21#include <sel4utils/arch/cache.h>
22
23#include "../helpers.h"
24#include "frame_type.h"
25
26#if defined(CONFIG_ARCH_ARM)
27static int test_page_flush(env_t env)
28{
29    seL4_CPtr frame, framec;
30    uintptr_t vstart, vstartc;
31    volatile uint32_t *ptr, *ptrc;
32    vka_t *vka;
33    int error;
34
35    vka = &env->vka;
36
37    void *vaddr;
38    void *vaddrc;
39    reservation_t reservation, reservationc;
40
41    reservation = vspace_reserve_range(&env->vspace,
42                                       PAGE_SIZE_4K, seL4_AllRights, 0, &vaddr);
43    assert(reservation.res);
44    reservationc = vspace_reserve_range(&env->vspace,
45                                        PAGE_SIZE_4K, seL4_AllRights, 1, &vaddrc);
46    assert(reservationc.res);
47
48    vstart = (uintptr_t)vaddr;
49    assert(IS_ALIGNED(vstart, seL4_PageBits));
50    vstartc = (uintptr_t)vaddrc;
51    assert(IS_ALIGNED(vstartc, seL4_PageBits));
52
53    ptr = (volatile uint32_t *)vstart;
54    ptrc = (volatile uint32_t *)vstartc;
55
56    /* Create a frame */
57    frame = vka_alloc_frame_leaky(vka, PAGE_BITS_4K);
58    test_assert(frame != seL4_CapNull);
59
60    /* Duplicate the cap */
61    framec = get_free_slot(env);
62    test_assert(framec != seL4_CapNull);
63    error = cnode_copy(env, frame, framec, seL4_AllRights);
64    test_error_eq(error, seL4_NoError);
65
66    /* map in a cap with cacheability */
67    error = vspace_map_pages_at_vaddr(&env->vspace, &framec, NULL, vaddrc, 1, seL4_PageBits, reservationc);
68    test_error_eq(error, seL4_NoError);
69    /* map in a cap without cacheability */
70    error = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation);
71    test_error_eq(error, seL4_NoError);
72
73    /* Clean makes data observable to non-cached page */
74    *ptr = 0xC0FFEE;
75    *ptrc = 0xDEADBEEF;
76    test_assert(*ptr == 0xC0FFEE);
77    test_assert(*ptrc == 0xDEADBEEF);
78    error = seL4_ARM_Page_Clean_Data(framec, 0, PAGE_SIZE_4K);
79    assert(!error);
80    test_assert(*ptr == 0xDEADBEEF);
81    test_assert(*ptrc == 0xDEADBEEF);
82    /* Clean/Invalidate makes data observable to non-cached page */
83    *ptr = 0xC0FFEE;
84    *ptrc = 0xDEADBEEF;
85    test_assert(*ptr == 0xC0FFEE);
86    test_assert(*ptrc == 0xDEADBEEF);
87    error = seL4_ARM_Page_CleanInvalidate_Data(framec, 0, PAGE_SIZE_4K);
88    assert(!error);
89    test_assert(*ptr == 0xDEADBEEF);
90    test_assert(*ptrc == 0xDEADBEEF);
91    /* Invalidate makes RAM data observable to cached page */
92    *ptr = 0xC0FFEE;
93    *ptrc = 0xDEADBEEF;
94    test_assert(*ptr == 0xC0FFEE);
95    test_assert(*ptrc == 0xDEADBEEF);
96    error = seL4_ARM_Page_Invalidate_Data(framec, 0, PAGE_SIZE_4K);
97    assert(!error);
98
99    /* In case the invalidation performs an implicit clean, write a new
100       value to RAM and make sure the cached read retrieves it
101       Remember to drain any store buffer!
102    */
103    *ptr = 0xBEEFCAFE;
104#if defined(CONFIG_ARCH_AARCH32)
105#ifndef CONFIG_ARCH_ARM_V6
106    asm volatile("dmb" ::: "memory");
107#endif /* CONFIG_ARCH_ARM_V6 */
108#elif defined(CONFIG_ARCH_AARCH64)
109    asm volatile("dmb sy" ::: "memory");
110#endif /* CONFIG_ARCH_AARCHxx */
111    test_assert(*ptrc == 0xBEEFCAFE);
112    test_assert(*ptr == 0xBEEFCAFE);
113
114    return sel4test_get_result();
115}
116
117static int test_large_page_flush_operation(env_t env)
118{
119    int num_frame_types = ARRAY_SIZE(frame_types);
120    seL4_CPtr frames[num_frame_types];
121    int error;
122    vka_t *vka = &env->vka;
123
124    /* Grab some free vspace big enough to hold all the tests. */
125    seL4_Word vstart;
126    reservation_t reserve = vspace_reserve_range_aligned(&env->vspace, VSPACE_RV_SIZE, VSPACE_RV_ALIGN_BITS,
127                                                         seL4_AllRights, 1, (void **) &vstart);
128    test_assert(reserve.res != 0);
129
130    /* Create us some frames to play with. */
131    for (int i = 0; i < num_frame_types; i++) {
132        frames[i] = vka_alloc_frame_leaky(vka, frame_types[i].size_bits);
133        assert(frames[i]);
134    }
135
136    /* Map the pages in. */
137    for (int i = 0; i < num_frame_types; i++) {
138        uintptr_t cookie = 0;
139        error = vspace_map_pages_at_vaddr(&env->vspace, &frames[i], &cookie, (void *)(vstart + frame_types[i].vaddr_offset), 1,
140                                          frame_types[i].size_bits, reserve);
141        test_error_eq(error, seL4_NoError);
142    }
143
144    /* See if we can invoke page flush on each of them */
145    for (int i = 0; i < num_frame_types; i++) {
146        error = seL4_ARM_Page_Invalidate_Data(frames[i], 0, BIT(frame_types[i].size_bits));
147        test_error_eq(error, 0);
148        error = seL4_ARM_Page_Clean_Data(frames[i], 0, BIT(frame_types[i].size_bits));
149        test_error_eq(error, 0);
150        error = seL4_ARM_Page_CleanInvalidate_Data(frames[i], 0, BIT(frame_types[i].size_bits));
151        test_error_eq(error, 0);
152        error = seL4_ARM_Page_Unify_Instruction(frames[i], 0, BIT(frame_types[i].size_bits));
153        test_error_eq(error, 0);
154        error = seL4_ARCH_PageDirectory_Invalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset,
155                                                        vstart + frame_types[i].vaddr_offset + BIT(frame_types[i].size_bits));
156        test_error_eq(error, 0);
157        error = seL4_ARCH_PageDirectory_Clean_Data(env->page_directory, vstart + frame_types[i].vaddr_offset,
158                                                   vstart + frame_types[i].vaddr_offset + BIT(frame_types[i].size_bits));
159        test_error_eq(error, 0);
160        error = seL4_ARCH_PageDirectory_CleanInvalidate_Data(env->page_directory, vstart + frame_types[i].vaddr_offset,
161                                                             vstart + frame_types[i].vaddr_offset + BIT(frame_types[i].size_bits));
162        test_error_eq(error, 0);
163        error = seL4_ARCH_PageDirectory_Unify_Instruction(env->page_directory, vstart + frame_types[i].vaddr_offset,
164                                                          vstart + frame_types[i].vaddr_offset + BIT(frame_types[i].size_bits));
165        test_error_eq(error, 0);
166    }
167
168    return sel4test_get_result();
169}
170
171static int test_page_directory_flush(env_t env)
172{
173    seL4_CPtr frame, framec;
174    uintptr_t vstart, vstartc;
175    volatile uint32_t *ptr, *ptrc;
176    vka_t *vka;
177    int err;
178
179    vka = &env->vka;
180
181    void *vaddr;
182    void *vaddrc;
183    reservation_t reservation, reservationc;
184
185    reservation = vspace_reserve_range(&env->vspace,
186                                       PAGE_SIZE_4K, seL4_AllRights, 0, &vaddr);
187    assert(reservation.res);
188    reservationc = vspace_reserve_range(&env->vspace,
189                                        PAGE_SIZE_4K, seL4_AllRights, 1, &vaddrc);
190    assert(reservationc.res);
191
192    vstart = (uintptr_t)vaddr;
193    assert(IS_ALIGNED(vstart, seL4_PageBits));
194    vstartc = (uintptr_t)vaddrc;
195    assert(IS_ALIGNED(vstartc, seL4_PageBits));
196
197    ptr = (volatile uint32_t *)vstart;
198    ptrc = (volatile uint32_t *)vstartc;
199
200    /* Create a frame */
201    frame = vka_alloc_frame_leaky(vka, PAGE_BITS_4K);
202    test_assert(frame != seL4_CapNull);
203
204    /* Duplicate the cap */
205    framec = get_free_slot(env);
206    test_assert(framec != seL4_CapNull);
207    err = cnode_copy(env, frame, framec, seL4_AllRights);
208    test_error_eq(err, seL4_NoError);
209
210    /* map in a cap with cacheability */
211    err = vspace_map_pages_at_vaddr(&env->vspace, &framec, NULL, vaddrc, 1, seL4_PageBits, reservationc);
212    test_error_eq(err, seL4_NoError);
213    /* map in a cap without cacheability */
214    err = vspace_map_pages_at_vaddr(&env->vspace, &frame, NULL, vaddr, 1, seL4_PageBits, reservation);
215    test_error_eq(err, seL4_NoError);
216
217    /* Clean makes data observable to non-cached page */
218    *ptr = 0xC0FFEE;
219    *ptrc = 0xDEADBEEF;
220    test_assert(*ptr == 0xC0FFEE);
221    test_assert(*ptrc == 0xDEADBEEF);
222    err = seL4_ARCH_PageDirectory_Clean_Data(env->page_directory, vstartc, vstartc + PAGE_SIZE_4K);
223    assert(!err);
224    test_assert(*ptr == 0xDEADBEEF);
225    test_assert(*ptrc == 0xDEADBEEF);
226    /* Clean/Invalidate makes data observable to non-cached page */
227    *ptr = 0xC0FFEE;
228    *ptrc = 0xDEADBEEF;
229    test_assert(*ptr == 0xC0FFEE);
230    test_assert(*ptrc == 0xDEADBEEF);
231    err = seL4_ARCH_PageDirectory_CleanInvalidate_Data(env->page_directory, vstartc, vstartc + PAGE_SIZE_4K);
232    assert(!err);
233    test_assert(*ptr == 0xDEADBEEF);
234    test_assert(*ptrc == 0xDEADBEEF);
235    /* Invalidate makes RAM data observable to cached page */
236    *ptr = 0xC0FFEE;
237    *ptrc = 0xDEADBEEF;
238    test_assert(*ptr == 0xC0FFEE);
239    test_assert(*ptrc == 0xDEADBEEF);
240    err = seL4_ARCH_PageDirectory_Invalidate_Data(env->page_directory, vstartc, vstartc + PAGE_SIZE_4K);
241    assert(!err);
242    /* In case the invalidation performs an implicit clean, write a new
243       value to RAM and make sure the cached read retrieves it.
244       Need to do an invalidate before retrieving though to guard
245       against speculative loads */
246    *ptr = 0xBEEFCAFE;
247    err = seL4_ARCH_PageDirectory_Invalidate_Data(env->page_directory, vstartc, vstartc + PAGE_SIZE_4K);
248    assert(!err);
249    test_assert(*ptrc == 0xBEEFCAFE);
250    test_assert(*ptr == 0xBEEFCAFE);
251
252    return sel4test_get_result();
253}
254
255DEFINE_TEST(CACHEFLUSH0001, "Test a cache maintenance on pages", test_page_flush, config_set(CONFIG_HAVE_CACHE))
256DEFINE_TEST(CACHEFLUSH0002, "Test a cache maintenance on page directories", test_page_directory_flush,
257            config_set(CONFIG_HAVE_CACHE))
258DEFINE_TEST(CACHEFLUSH0003, "Test that cache maintenance can be done on large pages", test_large_page_flush_operation,
259            config_set(CONFIG_HAVE_CACHE))
260
261#endif
262