1/*
2 * Copyright 2017, Data61
3 * Commonwealth Scientific and Industrial Research Organisation (CSIRO)
4 * ABN 41 687 119 230.
5 *
6 * This software may be distributed and modified according to the terms of
7 * the BSD 2-Clause license. Note that NO WARRANTY is provided.
8 * See "LICENSE_BSD2.txt" for details.
9 *
10 * @TAG(DATA61_BSD)
11 */
12
13/* Include Kconfig variables. */
14#include <autoconf.h>
15#include <sel4test-driver/gen_config.h>
16
17#include <stdio.h>
18
19#include "../helpers.h"
20
21/* This file contains tests related to bugs that have previously occured. Tests
22 * should validate that the bug no longer exists.
23 */
24
25/* Previously the layout of seL4_UserContext in libsel4 has been inconsistent
26 * with frameRegisters/gpRegisters in the kernel. This causes the syscalls
27 * seL4_TCB_ReadRegisters and seL4_TCB_WriteRegisters to function incorrectly.
28 * The following tests whether this issue has been re-introduced. For more
29 * information, see SELFOUR-113.
30 */
31
32/* An endpoint for the helper thread and the main thread to synchronise on. */
33static seL4_CPtr shared_endpoint;
34
35/* This function provides a wrapper around seL4_Send to the parent thread. It
36 * can't be called directly from asm because seL4_Send typically gets inlined
37 * and its likely that no visible copy of this function exists to branch to.
38 */
39void reply_to_parent(seL4_Word result)
40__attribute__((noinline));
41void reply_to_parent(seL4_Word result)
42{
43    seL4_MessageInfo_t info = seL4_MessageInfo_new(result, 0, 0, 0);
44    seL4_Word badge = 0; /* ignored */
45    seL4_Word empty = 0; /* ignored */
46
47#if defined(CONFIG_ARCH_IA32)
48#if defined(CONFIG_KERNEL_MCS)
49    seL4_SendWithMRs(shared_endpoint, info, &empty);
50#else
51    seL4_SendWithMRs(shared_endpoint, info, &empty, &empty);
52#endif /* CONFIG_KERNEL_MCS */
53#else
54    seL4_SendWithMRs(shared_endpoint, info, &empty, &empty, &empty, &empty);
55#endif /* CONFIG_ARCH_IA32 */
56
57    /* Block to avoid returning and assume our parent will kill us. */
58    seL4_Wait(shared_endpoint, &badge);
59}
60
61/* Test the registers we have been setup with and pass the result back to our
62 * parent. This function is really static, but GCC doesn't like a static
63 * declaration when the definition is in asm.
64 */
65void test_registers(void)
66#if defined(CONFIG_ARCH_AARCH32)
67/* Probably not necessary to mark this function naked as we define the
68 * whole thing in asm anyway, but just in case GCC tries to do anything
69 * sneaky.
70 */
71__attribute__((naked))
72#endif
73;
74int test_write_registers(env_t env)
75{
76    helper_thread_t thread;
77    seL4_UserContext context = { 0 };
78    int result;
79    seL4_MessageInfo_t info;
80    seL4_Word badge = 0; /* ignored */
81
82    /* Create a thread without starting it. Most of these arguments are
83     * ignored.
84     */
85    create_helper_thread(env, &thread);
86    shared_endpoint = thread.local_endpoint.cptr;
87
88#if defined(CONFIG_ARCH_AARCH32)
89    context.pc = (seL4_Word)&test_registers;
90    context.sp = 13;
91    context.r0 = 15;
92    context.r1 = 1;
93    context.r2 = 2;
94    context.r3 = 3;
95    context.r4 = 4;
96    context.r5 = 5;
97    context.r6 = 6;
98    context.r7 = 7;
99    context.r8 = 8;
100    context.r9 = 9;
101    context.r10 = 10;
102    context.r11 = 11;
103    context.r12 = 12;
104    /* R13 == SP */
105    context.r14 = 14; /* LR */
106    /* R15 == PC */
107#elif defined(CONFIG_ARCH_AARCH64)
108    context.pc = (seL4_Word)&test_registers;
109    context.sp = 1;
110    context.x0 = 2;
111    context.x1 = 3;
112    context.x2 = 4;
113    context.x3 = 5;
114    context.x4 = 6;
115    context.x5 = 7;
116    context.x6 = 8;
117    context.x7 = 9;
118    context.x8 = 10;
119    context.x9 = 11;
120    context.x10 = 12;
121    context.x11 = 13;
122    context.x12 = 14;
123    context.x13 = 15;
124    context.x14 = 16;
125    context.x15 = 17;
126    context.x16 = 18;
127    context.x17 = 19;
128    context.x18 = 20;
129    context.x19 = 21;
130    context.x20 = 22;
131    context.x21 = 23;
132    context.x22 = 24;
133    context.x23 = 25;
134    context.x24 = 26;
135    context.x25 = 27;
136    context.x26 = 28;
137    context.x27 = 29;
138    context.x28 = 30;
139    context.x29 = 31;
140    context.x30 = 32;
141#elif defined(CONFIG_ARCH_X86_64)
142    context.rip = (seL4_Word)&test_registers;
143    context.rsp = 0x00000004UL;
144    context.rax = 0x0000000aUL;
145    context.rbx = 0x0000000bUL;
146    context.rcx = 0x0000000cUL;
147    context.rdx = 0x0000000dUL;
148    context.rsi = 0x00000005UL;
149    context.rdi = 0x00000002UL;
150    context.rbp = 0x00000003UL;
151    context.rflags = 0x00000001UL;
152    context.r8 = 0x00000088UL;
153    context.r9 = 0x00000099UL;
154    context.r10 = 0x00000010UL;
155    context.r11 = 0x00000011UL;
156    context.r12 = 0x00000012UL;
157    context.r13 = 0x00000013UL;
158    context.r14 = 0x00000014UL;
159    context.r15 = 0x00000015UL;
160#elif defined(CONFIG_ARCH_X86)
161    context.eip = (seL4_Word)&test_registers;
162    context.esp = 0x00000004;
163    context.eax = 0x0000000a;
164    context.ebx = 0x0000000b;
165    context.ecx = 0x0000000c;
166    context.edx = 0x0000000d;
167    context.esi = 0x00000005;
168    context.edi = 0x00000002;
169    context.ebp = 0x00000003;
170    context.eflags = 0x00000001; /* Set the CF bit */
171#elif defined(CONFIG_ARCH_RISCV)
172    context.pc = (seL4_Word)&test_registers;
173    context.ra = 1;
174    context.sp = 2;
175    /* skip gp and tp, they are 'unallocatable' */
176    context.t0 = 4;
177    context.t1 = 5;
178    context.t2 = 6;
179    context.s0 = 7;
180    context.s1 = 8;
181    /* skip a0, we use it to load the immediate values to and compare the rest */
182    context.a1 = 10;
183    context.a2 = 11;
184    context.a3 = 12;
185    context.a4 = 13;
186    context.a5 = 14;
187    context.a6 = 15;
188
189    /* This is an ABI requirment */
190    extern char __global_pointer$[];
191    context.gp = (seL4_Word) __global_pointer$;
192#else
193#error "Unsupported architecture"
194#endif
195
196    result = seL4_TCB_WriteRegisters(get_helper_tcb(&thread), true, 0 /* ignored */,
197                                     sizeof(seL4_UserContext) / sizeof(seL4_Word), &context);
198
199    if (!result) {
200        /* If we've successfully started the thread, block until it's checked
201         * its registers.
202         */
203        info = api_recv(shared_endpoint, &badge, get_helper_reply(&thread));
204    }
205    cleanup_helper(env, &thread);
206
207    test_assert(result == 0);
208
209    result = seL4_MessageInfo_get_label(info);
210    test_assert(result == 0);
211
212    return sel4test_get_result();
213}
214DEFINE_TEST(REGRESSIONS0001, "Ensure WriteRegisters functions correctly", test_write_registers, true)
215
216#if defined(CONFIG_ARCH_ARM)
217#if defined(CONFIG_ARCH_AARCH32)
218/* Performs an ldrex and strex sequence with a context switch in between. See
219 * the comment in the function following for an explanation of purpose.
220 */
221static int do_ldrex(void)
222{
223    seL4_Word dummy1, dummy2, result;
224
225    /* We don't really care where we are loading from here. This is just to set
226     * the exclusive access tag.
227     */
228    asm volatile("ldrex %[rt], [%[rn]]"
229                 : [rt]"=&r"(dummy1)
230                 : [rn]"r"(&dummy2));
231
232    /* Force a context switch to our parent. */
233    seL4_Signal(shared_endpoint);
234
235    /* Again, we don't care where we are storing to. This is to see whether the
236     * exclusive access tag is still set.
237     */
238    asm volatile("strex %[rd], %[rt], [%[rn]]"
239                 : [rd]"=&r"(result)
240                 : [rt]"r"(dummy2), [rn]"r"(&dummy1));
241
242    /* The strex should have failed (and returned 1) because the context switch
243     * should have cleared the exclusive access tag.
244     */
245    return result == 0 ? FAILURE : SUCCESS;
246}
247#elif defined(CONFIG_ARCH_AARCH64)
248static int do_ldrex(void)
249{
250    seL4_Word dummy1, dummy2, result;
251
252    /* We don't really care where we are loading from here. This is just to set
253     * the exclusive access tag.
254     */
255    asm volatile("ldxr %[rt], [%[rn]]"
256                 : [rt]"=&r"(dummy1)
257                 : [rn]"r"(&dummy2));
258
259    /* Force a context switch to our parent. */
260    seL4_Signal(shared_endpoint);
261
262    /* Again, we don't care where we are storing to. This is to see whether the
263     * exclusive access tag is still set.
264     */
265    asm volatile("mov %x0, #0\t\n"
266                 "stxr %w0, %[rt], [%[rn]]"
267                 : [rd]"=&r"(result)
268                 : [rt]"r"(dummy2), [rn]"r"(&dummy1));
269
270    /* The stxr should have failed (and returned 1) because the context switch
271     * should have cleared the exclusive access tag.
272     */
273    return result == 0 ? FAILURE : SUCCESS;
274}
275#else
276#error "Unsupported architecture"
277#endif
278
279/* Prior to kernel changeset a4656bf3066e the load-exclusive monitor was not
280 * cleared on a context switch. This causes unexpected and incorrect behaviour
281 * for any userspace program relying on ldrex/strex to implement exclusion
282 * mechanisms. This test checks that the monitor is cleared correctly on
283 * switch. See SELFOUR-141 for more information.
284 */
285int test_ldrex_cleared(env_t env)
286{
287    helper_thread_t thread;
288    seL4_Word result;
289    seL4_Word badge = 0; /* ignored */
290
291    /* Create a child to perform the ldrex/strex. */
292    create_helper_thread(env, &thread);
293    shared_endpoint = thread.local_endpoint.cptr;
294    start_helper(env, &thread, (helper_fn_t) do_ldrex, 0, 0, 0, 0);
295
296    /* Wait for the child to do ldrex and signal us. */
297    seL4_Wait(shared_endpoint, &badge);
298
299    /* Wait for the child to do strex and exit. */
300    result = wait_for_helper(&thread);
301
302    cleanup_helper(env, &thread);
303
304    return result;
305}
306DEFINE_TEST(REGRESSIONS0002, "Test the load-exclusive monitor is cleared on context switch", test_ldrex_cleared, true)
307#endif
308
309#if defined(CONFIG_ARCH_IA32)
310static volatile int got_cpl = 0;
311static volatile uintptr_t stack_after_cpl = 0;
312static volatile uint32_t kernel_hash;
313void VISIBLE do_after_cpl_change(void)
314{
315    printf("XOR hash for first MB of kernel region 0x%x\n", kernel_hash);
316    test_check(false);
317    /* we don't have a stack to pop back up to message the test parent,
318     * but we can just fault, the result is that the test output
319     * will have a 'spurious' invalid instruction error, too bad */
320    asm volatile("hlt");
321}
322static int do_wait_for_cpl(void)
323{
324    /* check our current CPL */
325    uint16_t cs;
326    asm volatile("mov %%cs, %0" : "=r"(cs));
327    if ((cs & 3) == 0) {
328        got_cpl = 1;
329        /* prove we have root by doing something only the kernel can do */
330        /* like disabling interrupts */
331        asm volatile("cli");
332        /* let's hash a meg of kernel code */
333        int i;
334        uint32_t *kernel = (uint32_t *)0xe0000000;
335        for (i = 0; i < BIT(20) / sizeof(uint32_t); i++) {
336            kernel_hash ^= kernel[i];
337        }
338        /* take away our privileges (and put interupts back on) by constructing
339         * an iret. we need to lose root so that we can call the kernel again. We
340         * also need to stop using the kernel stack */
341        asm volatile(
342            "andl $0xFFFFFFE0, %%esp\n"
343            "push %[SS] \n"
344            "push %[STACK] \n"
345            "pushf \n"
346            "orl $0x200,(%%esp) \n"
347            "push %[CS] \n"
348            "push $do_after_cpl_change\n"
349            "iret\n"
350            :
351            : [SS]"r"(0x23),
352            [CS]"r"(0x1b),
353            [STACK]"r"(stack_after_cpl));
354        /* this is unreachable */
355    }
356    while (1) {
357        /* Sit here calling the kernel to maximize the chance that when the
358         * the timer interrupt finally fires it will actually happen when
359         * we are inside the kernel, this will result in the exception being
360         * delayed until we switch back to user mode */
361        seL4_Yield();
362    }
363    return 0;
364}
365
366int test_no_ret_with_cpl0(env_t env)
367{
368    helper_thread_t thread;
369    int error;
370
371    /* start a low priority helper thread that we will attempt to change the CPL of */
372    create_helper_thread(env, &thread);
373    start_helper(env, &thread, (helper_fn_t) do_wait_for_cpl, 0, 0, 0, 0);
374    stack_after_cpl = (uintptr_t)get_helper_initial_stack_pointer(&thread);
375
376    for (int i = 0; i < 20; i++) {
377        sel4test_sleep(env, NS_IN_S / 10);
378        if (got_cpl) {
379            wait_for_helper(&thread);
380            break;
381        }
382        /* reset the helper threads registers */
383        seL4_UserContext context;
384        error = seL4_TCB_ReadRegisters(get_helper_tcb(&thread), false, 0, sizeof(seL4_UserContext) / sizeof(seL4_Word),
385                                       &context);
386        test_eq(error, 0);
387        context.eip = (seL4_Word)do_wait_for_cpl;
388        /* If all went well in the helper thread then the interrupt came in
389         * whilst it was performing a kernel invocation. This means the interrupt
390         * would have been masked until it performed a 'sysexit' to return to user.
391         * Should an interrupt occur right then, however, the trap frame that is
392         * constructed is to the 'sysexit' instruction, and the stored CS and SS
393         * are CPL0 (kernel privilege). Kernel privilige is needed because once this
394         * thread is resumed (via iret) we will resume at the sysexit (and hence will
395         * need kernel privilege), then the sysexit will happen forcively removing
396         * kernel privilege.
397         * Right now, however, the interrupt has occured and we have woken up. The
398         * below call to WriteRegisters will overwrite the return address (which
399         * was going to be sysexit) to our own function, which will then be running
400         * at CPL0 */
401        error = seL4_TCB_WriteRegisters(get_helper_tcb(&thread), true, 0, sizeof(seL4_UserContext) / sizeof(seL4_Word),
402                                        &context);
403        test_eq(error, 0);
404    }
405
406    cleanup_helper(env, &thread);
407
408    return sel4test_get_result();
409}
410DEFINE_TEST(REGRESSIONS0003, "Test return to user with CPL0 exploit", test_no_ret_with_cpl0,
411            config_set(CONFIG_HAVE_TIMER))
412#endif /* defined(CONFIG_ARCH_IA32) */
413