1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <config.h>
12#include <types.h>
13#include <api/failures.h>
14#include <api/invocation.h>
15#include <api/syscall.h>
16#include <api/shared_types.h>
17#include <machine/io.h>
18#include <object/structures.h>
19#include <object/objecttype.h>
20#include <object/cnode.h>
21#include <object/tcb.h>
22#include <kernel/cspace.h>
23#include <kernel/thread.h>
24#include <kernel/vspace.h>
25#include <model/statedata.h>
26#include <util.h>
27#include <string.h>
28#include <stdint.h>
29#include <arch/smp/ipi_inline.h>
30
31#define NULL_PRIO 0
32
33static exception_t
34checkPrio(prio_t prio, tcb_t *auth)
35{
36    prio_t mcp;
37
38    mcp = auth->tcbMCP;
39
40    /* system invariant: existing MCPs are bounded */
41    assert(mcp <= seL4_MaxPrio);
42
43    /* can't assign a priority greater than our own mcp */
44    if (prio > mcp) {
45        current_syscall_error.type = seL4_RangeError;
46        current_syscall_error.rangeErrorMin = seL4_MinPrio;
47        current_syscall_error.rangeErrorMax = mcp;
48        return EXCEPTION_SYSCALL_ERROR;
49    }
50
51    return EXCEPTION_NONE;
52}
53
54static inline void
55addToBitmap(word_t cpu, word_t dom, word_t prio)
56{
57    word_t l1index;
58    word_t l1index_inverted;
59
60    l1index = prio_to_l1index(prio);
61    l1index_inverted = invert_l1index(l1index);
62
63    NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) |= BIT(l1index);
64    /* we invert the l1 index when accessed the 2nd level of the bitmap in
65       order to increase the liklihood that high prio threads l2 index word will
66       be on the same cache line as the l1 index word - this makes sure the
67       fastpath is fastest for high prio threads */
68    NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) |= BIT(prio & MASK(wordRadix));
69}
70
71static inline void
72removeFromBitmap(word_t cpu, word_t dom, word_t prio)
73{
74    word_t l1index;
75    word_t l1index_inverted;
76
77    l1index = prio_to_l1index(prio);
78    l1index_inverted = invert_l1index(l1index);
79    NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) &= ~BIT(prio & MASK(wordRadix));
80    if (unlikely(!NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu))) {
81        NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) &= ~BIT(l1index);
82    }
83}
84
85/* Add TCB to the head of a scheduler queue */
86void
87tcbSchedEnqueue(tcb_t *tcb)
88{
89    if (!thread_state_get_tcbQueued(tcb->tcbState)) {
90        tcb_queue_t queue;
91        dom_t dom;
92        prio_t prio;
93        word_t idx;
94
95        dom = tcb->tcbDomain;
96        prio = tcb->tcbPriority;
97        idx = ready_queues_index(dom, prio);
98        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
99
100        if (!queue.end) { /* Empty list */
101            queue.end = tcb;
102            addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
103        } else {
104            queue.head->tcbSchedPrev = tcb;
105        }
106        tcb->tcbSchedPrev = NULL;
107        tcb->tcbSchedNext = queue.head;
108        queue.head = tcb;
109
110        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
111
112        thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
113    }
114}
115
116/* Add TCB to the end of a scheduler queue */
117void
118tcbSchedAppend(tcb_t *tcb)
119{
120    if (!thread_state_get_tcbQueued(tcb->tcbState)) {
121        tcb_queue_t queue;
122        dom_t dom;
123        prio_t prio;
124        word_t idx;
125
126        dom = tcb->tcbDomain;
127        prio = tcb->tcbPriority;
128        idx = ready_queues_index(dom, prio);
129        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
130
131        if (!queue.head) { /* Empty list */
132            queue.head = tcb;
133            addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
134        } else {
135            queue.end->tcbSchedNext = tcb;
136        }
137        tcb->tcbSchedPrev = queue.end;
138        tcb->tcbSchedNext = NULL;
139        queue.end = tcb;
140
141        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
142
143        thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
144    }
145}
146
147/* Remove TCB from a scheduler queue */
148void
149tcbSchedDequeue(tcb_t *tcb)
150{
151    if (thread_state_get_tcbQueued(tcb->tcbState)) {
152        tcb_queue_t queue;
153        dom_t dom;
154        prio_t prio;
155        word_t idx;
156
157        dom = tcb->tcbDomain;
158        prio = tcb->tcbPriority;
159        idx = ready_queues_index(dom, prio);
160        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
161
162        if (tcb->tcbSchedPrev) {
163            tcb->tcbSchedPrev->tcbSchedNext = tcb->tcbSchedNext;
164        } else {
165            queue.head = tcb->tcbSchedNext;
166            if (likely(!tcb->tcbSchedNext)) {
167                removeFromBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
168            }
169        }
170
171        if (tcb->tcbSchedNext) {
172            tcb->tcbSchedNext->tcbSchedPrev = tcb->tcbSchedPrev;
173        } else {
174            queue.end = tcb->tcbSchedPrev;
175        }
176
177        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
178
179        thread_state_ptr_set_tcbQueued(&tcb->tcbState, false);
180    }
181}
182
183#ifdef CONFIG_DEBUG_BUILD
184void tcbDebugAppend(tcb_t *tcb)
185{
186    /* prepend to the list */
187    tcb->tcbDebugPrev = NULL;
188
189    tcb->tcbDebugNext = NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity);
190
191    if (NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
192        NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)->tcbDebugPrev = tcb;
193    }
194
195    NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = tcb;
196}
197
198void tcbDebugRemove(tcb_t *tcb)
199{
200    assert(NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) != NULL);
201    if (tcb == NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
202        NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)->tcbDebugNext;
203    } else {
204        assert(tcb->tcbDebugPrev);
205        tcb->tcbDebugPrev->tcbDebugNext = tcb->tcbDebugNext;
206    }
207
208    if (tcb->tcbDebugNext) {
209        tcb->tcbDebugNext->tcbDebugPrev = tcb->tcbDebugPrev;
210    }
211
212    tcb->tcbDebugPrev = NULL;
213    tcb->tcbDebugNext = NULL;
214}
215#endif /* CONFIG_DEBUG_BUILD */
216
217/* Add TCB to the end of an endpoint queue */
218tcb_queue_t
219tcbEPAppend(tcb_t *tcb, tcb_queue_t queue)
220{
221    if (!queue.head) { /* Empty list */
222        queue.head = tcb;
223    } else {
224        queue.end->tcbEPNext = tcb;
225    }
226    tcb->tcbEPPrev = queue.end;
227    tcb->tcbEPNext = NULL;
228    queue.end = tcb;
229
230    return queue;
231}
232
233/* Remove TCB from an endpoint queue */
234tcb_queue_t
235tcbEPDequeue(tcb_t *tcb, tcb_queue_t queue)
236{
237    if (tcb->tcbEPPrev) {
238        tcb->tcbEPPrev->tcbEPNext = tcb->tcbEPNext;
239    } else {
240        queue.head = tcb->tcbEPNext;
241    }
242
243    if (tcb->tcbEPNext) {
244        tcb->tcbEPNext->tcbEPPrev = tcb->tcbEPPrev;
245    } else {
246        queue.end = tcb->tcbEPPrev;
247    }
248
249    return queue;
250}
251
252cptr_t PURE
253getExtraCPtr(word_t *bufferPtr, word_t i)
254{
255    return (cptr_t)bufferPtr[seL4_MsgMaxLength + 2 + i];
256}
257
258void
259setExtraBadge(word_t *bufferPtr, word_t badge,
260              word_t i)
261{
262    bufferPtr[seL4_MsgMaxLength + 2 + i] = badge;
263}
264
265void
266setupCallerCap(tcb_t *sender, tcb_t *receiver)
267{
268    cte_t *replySlot, *callerSlot;
269    cap_t masterCap UNUSED, callerCap UNUSED;
270
271    setThreadState(sender, ThreadState_BlockedOnReply);
272    replySlot = TCB_PTR_CTE_PTR(sender, tcbReply);
273    masterCap = replySlot->cap;
274    /* Haskell error: "Sender must have a valid master reply cap" */
275    assert(cap_get_capType(masterCap) == cap_reply_cap);
276    assert(cap_reply_cap_get_capReplyMaster(masterCap));
277    assert(TCB_PTR(cap_reply_cap_get_capTCBPtr(masterCap)) == sender);
278    callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
279    callerCap = callerSlot->cap;
280    /* Haskell error: "Caller cap must not already exist" */
281    assert(cap_get_capType(callerCap) == cap_null_cap);
282    cteInsert(cap_reply_cap_new(false, TCB_REF(sender)),
283              replySlot, callerSlot);
284}
285
286void
287deleteCallerCap(tcb_t *receiver)
288{
289    cte_t *callerSlot;
290
291    callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
292    /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
293    cteDeleteOne(callerSlot);
294}
295
296extra_caps_t current_extra_caps;
297
298exception_t
299lookupExtraCaps(tcb_t* thread, word_t *bufferPtr, seL4_MessageInfo_t info)
300{
301    lookupSlot_raw_ret_t lu_ret;
302    cptr_t cptr;
303    word_t i, length;
304
305    if (!bufferPtr) {
306        current_extra_caps.excaprefs[0] = NULL;
307        return EXCEPTION_NONE;
308    }
309
310    length = seL4_MessageInfo_get_extraCaps(info);
311
312    for (i = 0; i < length; i++) {
313        cptr = getExtraCPtr(bufferPtr, i);
314
315        lu_ret = lookupSlot(thread, cptr);
316        if (lu_ret.status != EXCEPTION_NONE) {
317            current_fault = seL4_Fault_CapFault_new(cptr, false);
318            return lu_ret.status;
319        }
320
321        current_extra_caps.excaprefs[i] = lu_ret.slot;
322    }
323    if (i < seL4_MsgMaxExtraCaps) {
324        current_extra_caps.excaprefs[i] = NULL;
325    }
326
327    return EXCEPTION_NONE;
328}
329
330/* Copy IPC MRs from one thread to another */
331word_t
332copyMRs(tcb_t *sender, word_t *sendBuf, tcb_t *receiver,
333        word_t *recvBuf, word_t n)
334{
335    word_t i;
336
337    /* Copy inline words */
338    for (i = 0; i < n && i < n_msgRegisters; i++) {
339        setRegister(receiver, msgRegisters[i],
340                    getRegister(sender, msgRegisters[i]));
341    }
342
343    if (!recvBuf || !sendBuf) {
344        return i;
345    }
346
347    /* Copy out-of-line words */
348    for (; i < n; i++) {
349        recvBuf[i + 1] = sendBuf[i + 1];
350    }
351
352    return i;
353}
354
355#ifdef ENABLE_SMP_SUPPORT
356/* This checks if the current updated to scheduler queue is changing the previous scheduling
357 * decision made by the scheduler. If its a case, an `irq_reschedule_ipi` is sent */
358void
359remoteQueueUpdate(tcb_t *tcb)
360{
361    /* only ipi if the target is for the current domain */
362    if (tcb->tcbAffinity != getCurrentCPUIndex() && tcb->tcbDomain == ksCurDomain) {
363        tcb_t *targetCurThread = NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity);
364
365        /* reschedule if the target core is idle or we are waking a higher priority thread */
366        if (targetCurThread == NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity)  ||
367                tcb->tcbPriority > targetCurThread->tcbPriority) {
368            ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
369        }
370    }
371}
372
373/* This makes sure the the TCB is not being run on other core.
374 * It would request 'IpiRemoteCall_Stall' to switch the core from this TCB
375 * We also request the 'irq_reschedule_ipi' to restore the state of target core */
376void
377remoteTCBStall(tcb_t *tcb)
378{
379
380    if (tcb->tcbAffinity != getCurrentCPUIndex() &&
381            NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity) == tcb) {
382        doRemoteStall(tcb->tcbAffinity);
383        ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
384    }
385}
386
387static exception_t
388invokeTCB_SetAffinity(tcb_t *thread, word_t affinity)
389{
390    /* remove the tcb from scheduler queue in case it is already in one
391     * and add it to new queue if required */
392    tcbSchedDequeue(thread);
393    migrateTCB(thread, affinity);
394    if (isRunnable(thread)) {
395        SCHED_APPEND(thread);
396    }
397    /* reschedule current cpu if tcb moves itself */
398    if (thread == NODE_STATE(ksCurThread)) {
399        rescheduleRequired();
400    }
401    return EXCEPTION_NONE;
402}
403
404static exception_t
405decodeSetAffinity(cap_t cap, word_t length, word_t *buffer)
406{
407    tcb_t *tcb;
408    word_t affinity;
409
410    if (length < 1) {
411        userError("TCB SetAffinity: Truncated message.");
412        current_syscall_error.type = seL4_TruncatedMessage;
413        return EXCEPTION_SYSCALL_ERROR;
414    }
415
416    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
417
418    affinity = getSyscallArg(0, buffer);
419    if (affinity >= ksNumCPUs) {
420        userError("TCB SetAffinity: Requested CPU does not exist.");
421        current_syscall_error.type = seL4_IllegalOperation;
422        return EXCEPTION_SYSCALL_ERROR;
423    }
424
425    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
426    return invokeTCB_SetAffinity(tcb, affinity);
427}
428#endif /* ENABLE_SMP_SUPPORT */
429
430#ifdef CONFIG_HARDWARE_DEBUG_API
431static exception_t
432invokeConfigureSingleStepping(word_t *buffer, tcb_t *t,
433                              uint16_t bp_num, word_t n_instrs)
434{
435    bool_t bp_was_consumed;
436
437    bp_was_consumed = configureSingleStepping(t, bp_num, n_instrs, false);
438    if (n_instrs == 0) {
439        unsetBreakpointUsedFlag(t, bp_num);
440        setMR(NODE_STATE(ksCurThread), buffer, 0, false);
441    } else {
442        setBreakpointUsedFlag(t, bp_num);
443        setMR(NODE_STATE(ksCurThread), buffer, 0, bp_was_consumed);
444    }
445    return EXCEPTION_NONE;
446}
447
448static exception_t
449decodeConfigureSingleStepping(cap_t cap, word_t *buffer)
450{
451    uint16_t bp_num;
452    word_t n_instrs;
453    tcb_t *tcb;
454    syscall_error_t syserr;
455
456    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
457
458    bp_num = getSyscallArg(0, buffer);
459    n_instrs = getSyscallArg(1, buffer);
460
461    syserr = Arch_decodeConfigureSingleStepping(tcb, bp_num, n_instrs, false);
462    if (syserr.type != seL4_NoError) {
463        current_syscall_error = syserr;
464        return EXCEPTION_SYSCALL_ERROR;
465    }
466
467    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
468    return invokeConfigureSingleStepping(buffer, tcb, bp_num, n_instrs);
469}
470
471static exception_t
472invokeSetBreakpoint(tcb_t *tcb, uint16_t bp_num,
473                    word_t vaddr, word_t type, word_t size, word_t rw)
474{
475    setBreakpoint(tcb, bp_num, vaddr, type, size, rw);
476    /* Signal restore_user_context() to pop the breakpoint context on return. */
477    setBreakpointUsedFlag(tcb, bp_num);
478    return EXCEPTION_NONE;
479}
480
481static exception_t
482decodeSetBreakpoint(cap_t cap, word_t *buffer)
483{
484    uint16_t bp_num;
485    word_t vaddr, type, size, rw;
486    tcb_t *tcb;
487    syscall_error_t error;
488
489    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
490    bp_num = getSyscallArg(0, buffer);
491    vaddr = getSyscallArg(1, buffer);
492    type = getSyscallArg(2, buffer);
493    size = getSyscallArg(3, buffer);
494    rw = getSyscallArg(4, buffer);
495
496    /* We disallow the user to set breakpoint addresses that are in the kernel
497     * vaddr range.
498     */
499    if (vaddr >= (word_t)kernelBase) {
500        userError("Debug: Invalid address %lx: bp addresses must be userspace "
501                  "addresses.",
502                  vaddr);
503        current_syscall_error.type = seL4_InvalidArgument;
504        current_syscall_error.invalidArgumentNumber = 1;
505        return EXCEPTION_SYSCALL_ERROR;
506    }
507
508    if (type != seL4_InstructionBreakpoint && type != seL4_DataBreakpoint) {
509        userError("Debug: Unknown breakpoint type %lx.", type);
510        current_syscall_error.type = seL4_InvalidArgument;
511        current_syscall_error.invalidArgumentNumber = 2;
512        return EXCEPTION_SYSCALL_ERROR;
513    } else if (type == seL4_InstructionBreakpoint) {
514        if (size != 0) {
515            userError("Debug: Instruction bps must have size of 0.");
516            current_syscall_error.type = seL4_InvalidArgument;
517            current_syscall_error.invalidArgumentNumber = 3;
518            return EXCEPTION_SYSCALL_ERROR;
519        }
520        if (rw != seL4_BreakOnRead) {
521            userError("Debug: Instruction bps must be break-on-read.");
522            current_syscall_error.type = seL4_InvalidArgument;
523            current_syscall_error.invalidArgumentNumber = 4;
524            return EXCEPTION_SYSCALL_ERROR;
525        }
526        if (bp_num >= seL4_FirstWatchpoint
527                && seL4_FirstBreakpoint != seL4_FirstWatchpoint) {
528            userError("Debug: Can't specify a watchpoint ID with type seL4_InstructionBreakpoint.");
529            current_syscall_error.type = seL4_InvalidArgument;
530            current_syscall_error.invalidArgumentNumber = 2;
531            return EXCEPTION_SYSCALL_ERROR;
532        }
533    } else if (type == seL4_DataBreakpoint) {
534        if (size == 0) {
535            userError("Debug: Data bps cannot have size of 0.");
536            current_syscall_error.type = seL4_InvalidArgument;
537            current_syscall_error.invalidArgumentNumber = 3;
538            return EXCEPTION_SYSCALL_ERROR;
539        }
540        if (bp_num < seL4_FirstWatchpoint) {
541            userError("Debug: Data watchpoints cannot specify non-data watchpoint ID.");
542            current_syscall_error.type = seL4_InvalidArgument;
543            current_syscall_error.invalidArgumentNumber = 2;
544            return EXCEPTION_SYSCALL_ERROR;
545        }
546    } else if (type == seL4_SoftwareBreakRequest) {
547        userError("Debug: Use a software breakpoint instruction to trigger a "
548                  "software breakpoint.");
549        current_syscall_error.type = seL4_InvalidArgument;
550        current_syscall_error.invalidArgumentNumber = 2;
551        return EXCEPTION_SYSCALL_ERROR;
552    }
553
554    if (rw != seL4_BreakOnRead && rw != seL4_BreakOnWrite
555            && rw != seL4_BreakOnReadWrite) {
556        userError("Debug: Unknown access-type %lu.", rw);
557        current_syscall_error.type = seL4_InvalidArgument;
558        current_syscall_error.invalidArgumentNumber = 3;
559        return EXCEPTION_SYSCALL_ERROR;
560    }
561    if (size != 0 && size != 1 && size != 2 && size != 4 && size != 8) {
562        userError("Debug: Invalid size %lu.", size);
563        current_syscall_error.type = seL4_InvalidArgument;
564        current_syscall_error.invalidArgumentNumber = 3;
565        return EXCEPTION_SYSCALL_ERROR;
566    }
567    if (size > 0 && vaddr & (size - 1)) {
568        /* Just Don't allow unaligned watchpoints. They are undefined
569         * both ARM and x86.
570         *
571         * X86: Intel manuals, vol3, 17.2.5:
572         *  "Two-byte ranges must be aligned on word boundaries; 4-byte
573         *   ranges must be aligned on doubleword boundaries"
574         *  "Unaligned data or I/O breakpoint addresses do not yield valid
575         *   results"
576         *
577         * ARM: ARMv7 manual, C11.11.44:
578         *  "A DBGWVR is programmed with a word-aligned address."
579         */
580        userError("Debug: Unaligned data watchpoint address %lx (size %lx) "
581                  "rejected.\n",
582                  vaddr, size);
583
584        current_syscall_error.type = seL4_AlignmentError;
585        return EXCEPTION_SYSCALL_ERROR;
586    }
587
588    error = Arch_decodeSetBreakpoint(tcb, bp_num, vaddr, type, size, rw);
589    if (error.type != seL4_NoError) {
590        current_syscall_error = error;
591        return EXCEPTION_SYSCALL_ERROR;
592    }
593
594    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
595    return invokeSetBreakpoint(tcb, bp_num,
596                               vaddr, type, size, rw);
597}
598
599static exception_t
600invokeGetBreakpoint(word_t *buffer, tcb_t *tcb, uint16_t bp_num)
601{
602    getBreakpoint_t res;
603
604    res = getBreakpoint(tcb, bp_num);
605    setMR(NODE_STATE(ksCurThread), buffer, 0, res.vaddr);
606    setMR(NODE_STATE(ksCurThread), buffer, 1, res.type);
607    setMR(NODE_STATE(ksCurThread), buffer, 2, res.size);
608    setMR(NODE_STATE(ksCurThread), buffer, 3, res.rw);
609    setMR(NODE_STATE(ksCurThread), buffer, 4, res.is_enabled);
610    return EXCEPTION_NONE;
611}
612
613static exception_t
614decodeGetBreakpoint(cap_t cap, word_t *buffer)
615{
616    tcb_t *tcb;
617    uint16_t bp_num;
618    syscall_error_t error;
619
620    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
621    bp_num = getSyscallArg(0, buffer);
622
623    error = Arch_decodeGetBreakpoint(tcb, bp_num);
624    if (error.type != seL4_NoError) {
625        current_syscall_error = error;
626        return EXCEPTION_SYSCALL_ERROR;
627    }
628
629    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
630    return invokeGetBreakpoint(buffer, tcb, bp_num);
631}
632
633static exception_t
634invokeUnsetBreakpoint(tcb_t *tcb, uint16_t bp_num)
635{
636    /* Maintain the bitfield of in-use breakpoints. */
637    unsetBreakpoint(tcb, bp_num);
638    unsetBreakpointUsedFlag(tcb, bp_num);
639    return EXCEPTION_NONE;
640}
641
642static exception_t
643decodeUnsetBreakpoint(cap_t cap, word_t *buffer)
644{
645    tcb_t *tcb;
646    uint16_t bp_num;
647    syscall_error_t error;
648
649    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
650    bp_num = getSyscallArg(0, buffer);
651
652    error = Arch_decodeUnsetBreakpoint(tcb, bp_num);
653    if (error.type != seL4_NoError) {
654        current_syscall_error = error;
655        return EXCEPTION_SYSCALL_ERROR;
656    }
657
658    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
659    return invokeUnsetBreakpoint(tcb, bp_num);
660}
661#endif /* CONFIG_HARDWARE_DEBUG_API */
662
663static exception_t
664invokeSetTLSBase(tcb_t *thread, word_t tls_base)
665{
666    setRegister(thread, TLS_BASE, tls_base);
667    if (thread == NODE_STATE(ksCurThread)) {
668        /* If this is the current thread force a reschedule to ensure that any changes
669         * to the TLS_BASE are realized */
670        rescheduleRequired();
671    }
672
673    return EXCEPTION_NONE;
674}
675
676static exception_t
677decodeSetTLSBase(cap_t cap, word_t length, word_t *buffer)
678{
679    word_t tls_base;
680
681    if (length < 1) {
682        userError("TCB SetTLSBase: Truncated message.");
683        current_syscall_error.type = seL4_TruncatedMessage;
684        return EXCEPTION_SYSCALL_ERROR;
685    }
686
687    tls_base = getSyscallArg(0, buffer);
688
689    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
690    return invokeSetTLSBase(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), tls_base);
691}
692
693/* The following functions sit in the syscall error monad, but include the
694 * exception cases for the preemptible bottom end, as they call the invoke
695 * functions directly.  This is a significant deviation from the Haskell
696 * spec. */
697exception_t
698decodeTCBInvocation(word_t invLabel, word_t length, cap_t cap,
699                    cte_t* slot, extra_caps_t excaps, bool_t call,
700                    word_t *buffer)
701{
702    /* Stall the core if we are operating on a remote TCB that is currently running */
703    SMP_COND_STATEMENT(remoteTCBStall(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));)
704
705    switch (invLabel) {
706    case TCBReadRegisters:
707        /* Second level of decoding */
708        return decodeReadRegisters(cap, length, call, buffer);
709
710    case TCBWriteRegisters:
711        return decodeWriteRegisters(cap, length, buffer);
712
713    case TCBCopyRegisters:
714        return decodeCopyRegisters(cap, length, excaps, buffer);
715
716    case TCBSuspend:
717        /* Jump straight to the invoke */
718        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
719        return invokeTCB_Suspend(
720                   TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
721
722    case TCBResume:
723        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
724        return invokeTCB_Resume(
725                   TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
726
727    case TCBConfigure:
728        return decodeTCBConfigure(cap, length, slot, excaps, buffer);
729
730    case TCBSetPriority:
731        return decodeSetPriority(cap, length, excaps, buffer);
732
733    case TCBSetMCPriority:
734        return decodeSetMCPriority(cap, length, excaps, buffer);
735
736    case TCBSetSchedParams:
737        return decodeSetSchedParams(cap, length, excaps, buffer);
738
739    case TCBSetIPCBuffer:
740        return decodeSetIPCBuffer(cap, length, slot, excaps, buffer);
741
742    case TCBSetSpace:
743        return decodeSetSpace(cap, length, slot, excaps, buffer);
744
745    case TCBBindNotification:
746        return decodeBindNotification(cap, excaps);
747
748    case TCBUnbindNotification:
749        return decodeUnbindNotification(cap);
750
751#ifdef ENABLE_SMP_SUPPORT
752    case TCBSetAffinity:
753        return decodeSetAffinity(cap, length, buffer);
754#endif /* ENABLE_SMP_SUPPORT */
755
756        /* There is no notion of arch specific TCB invocations so this needs to go here */
757#ifdef CONFIG_VTX
758    case TCBSetEPTRoot:
759        return decodeSetEPTRoot(cap, excaps);
760#endif
761
762#ifdef CONFIG_HARDWARE_DEBUG_API
763    case TCBConfigureSingleStepping:
764        return decodeConfigureSingleStepping(cap, buffer);
765
766    case TCBSetBreakpoint:
767        return decodeSetBreakpoint(cap, buffer);
768
769    case TCBGetBreakpoint:
770        return decodeGetBreakpoint(cap, buffer);
771
772    case TCBUnsetBreakpoint:
773        return decodeUnsetBreakpoint(cap, buffer);
774#endif
775
776    case TCBSetTLSBase:
777        return decodeSetTLSBase(cap, length, buffer);
778
779    default:
780        /* Haskell: "throw IllegalOperation" */
781        userError("TCB: Illegal operation.");
782        current_syscall_error.type = seL4_IllegalOperation;
783        return EXCEPTION_SYSCALL_ERROR;
784    }
785}
786
787enum CopyRegistersFlags {
788    CopyRegisters_suspendSource = 0,
789    CopyRegisters_resumeTarget = 1,
790    CopyRegisters_transferFrame = 2,
791    CopyRegisters_transferInteger = 3
792};
793
794exception_t
795decodeCopyRegisters(cap_t cap, word_t length,
796                    extra_caps_t excaps, word_t *buffer)
797{
798    word_t transferArch;
799    tcb_t *srcTCB;
800    cap_t source_cap;
801    word_t flags;
802
803    if (length < 1 || excaps.excaprefs[0] == NULL) {
804        userError("TCB CopyRegisters: Truncated message.");
805        current_syscall_error.type = seL4_TruncatedMessage;
806        return EXCEPTION_SYSCALL_ERROR;
807    }
808
809    flags = getSyscallArg(0, buffer);
810
811    transferArch = Arch_decodeTransfer(flags >> 8);
812
813    source_cap = excaps.excaprefs[0]->cap;
814
815    if (cap_get_capType(source_cap) == cap_thread_cap) {
816        srcTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(source_cap));
817    } else {
818        userError("TCB CopyRegisters: Invalid source TCB.");
819        current_syscall_error.type = seL4_InvalidCapability;
820        current_syscall_error.invalidCapNumber = 1;
821        return EXCEPTION_SYSCALL_ERROR;
822    }
823
824    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
825    return invokeTCB_CopyRegisters(
826               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), srcTCB,
827               flags & BIT(CopyRegisters_suspendSource),
828               flags & BIT(CopyRegisters_resumeTarget),
829               flags & BIT(CopyRegisters_transferFrame),
830               flags & BIT(CopyRegisters_transferInteger),
831               transferArch);
832
833}
834
835enum ReadRegistersFlags {
836    ReadRegisters_suspend = 0
837};
838
839exception_t
840decodeReadRegisters(cap_t cap, word_t length, bool_t call,
841                    word_t *buffer)
842{
843    word_t transferArch, flags, n;
844    tcb_t* thread;
845
846    if (length < 2) {
847        userError("TCB ReadRegisters: Truncated message.");
848        current_syscall_error.type = seL4_TruncatedMessage;
849        return EXCEPTION_SYSCALL_ERROR;
850    }
851
852    flags = getSyscallArg(0, buffer);
853    n     = getSyscallArg(1, buffer);
854
855    if (n < 1 || n > n_frameRegisters + n_gpRegisters) {
856        userError("TCB ReadRegisters: Attempted to read an invalid number of registers (%d).",
857                  (int)n);
858        current_syscall_error.type = seL4_RangeError;
859        current_syscall_error.rangeErrorMin = 1;
860        current_syscall_error.rangeErrorMax = n_frameRegisters +
861                                              n_gpRegisters;
862        return EXCEPTION_SYSCALL_ERROR;
863    }
864
865    transferArch = Arch_decodeTransfer(flags >> 8);
866
867    thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
868    if (thread == NODE_STATE(ksCurThread)) {
869        userError("TCB ReadRegisters: Attempted to read our own registers.");
870        current_syscall_error.type = seL4_IllegalOperation;
871        return EXCEPTION_SYSCALL_ERROR;
872    }
873
874    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
875    return invokeTCB_ReadRegisters(
876               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)),
877               flags & BIT(ReadRegisters_suspend),
878               n, transferArch, call);
879}
880
881enum WriteRegistersFlags {
882    WriteRegisters_resume = 0
883};
884
885exception_t
886decodeWriteRegisters(cap_t cap, word_t length, word_t *buffer)
887{
888    word_t flags, w;
889    word_t transferArch;
890    tcb_t* thread;
891
892    if (length < 2) {
893        userError("TCB WriteRegisters: Truncated message.");
894        current_syscall_error.type = seL4_TruncatedMessage;
895        return EXCEPTION_SYSCALL_ERROR;
896    }
897
898    flags = getSyscallArg(0, buffer);
899    w     = getSyscallArg(1, buffer);
900
901    if (length - 2 < w) {
902        userError("TCB WriteRegisters: Message too short for requested write size (%d/%d).",
903                  (int)(length - 2), (int)w);
904        current_syscall_error.type = seL4_TruncatedMessage;
905        return EXCEPTION_SYSCALL_ERROR;
906    }
907
908    transferArch = Arch_decodeTransfer(flags >> 8);
909
910    thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
911    if (thread == NODE_STATE(ksCurThread)) {
912        userError("TCB WriteRegisters: Attempted to write our own registers.");
913        current_syscall_error.type = seL4_IllegalOperation;
914        return EXCEPTION_SYSCALL_ERROR;
915    }
916
917    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
918    return invokeTCB_WriteRegisters(thread,
919                                    flags & BIT(WriteRegisters_resume),
920                                    w, transferArch, buffer);
921}
922
923/* SetPriority, SetMCPriority, SetSchedParams, SetIPCBuffer and SetSpace are all
924 * specialisations of TCBConfigure. */
925exception_t
926decodeTCBConfigure(cap_t cap, word_t length, cte_t* slot,
927                   extra_caps_t rootCaps, word_t *buffer)
928{
929    cte_t *bufferSlot, *cRootSlot, *vRootSlot;
930    cap_t bufferCap, cRootCap, vRootCap;
931    deriveCap_ret_t dc_ret;
932    cptr_t faultEP;
933    word_t cRootData, vRootData, bufferAddr;
934
935    if (length < 4 || rootCaps.excaprefs[0] == NULL
936            || rootCaps.excaprefs[1] == NULL
937            || rootCaps.excaprefs[2] == NULL) {
938        userError("TCB Configure: Truncated message.");
939        current_syscall_error.type = seL4_TruncatedMessage;
940        return EXCEPTION_SYSCALL_ERROR;
941    }
942
943    faultEP       = getSyscallArg(0, buffer);
944    cRootData     = getSyscallArg(1, buffer);
945    vRootData     = getSyscallArg(2, buffer);
946    bufferAddr    = getSyscallArg(3, buffer);
947
948    cRootSlot  = rootCaps.excaprefs[0];
949    cRootCap   = rootCaps.excaprefs[0]->cap;
950    vRootSlot  = rootCaps.excaprefs[1];
951    vRootCap   = rootCaps.excaprefs[1]->cap;
952    bufferSlot = rootCaps.excaprefs[2];
953    bufferCap  = rootCaps.excaprefs[2]->cap;
954
955    if (bufferAddr == 0) {
956        bufferSlot = NULL;
957    } else {
958        dc_ret = deriveCap(bufferSlot, bufferCap);
959        if (dc_ret.status != EXCEPTION_NONE) {
960            return dc_ret.status;
961        }
962        bufferCap = dc_ret.cap;
963
964        exception_t e = checkValidIPCBuffer(bufferAddr, bufferCap);
965        if (e != EXCEPTION_NONE) {
966            return e;
967        }
968    }
969
970    if (slotCapLongRunningDelete(
971                TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
972            slotCapLongRunningDelete(
973                TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
974        userError("TCB Configure: CSpace or VSpace currently being deleted.");
975        current_syscall_error.type = seL4_IllegalOperation;
976        return EXCEPTION_SYSCALL_ERROR;
977    }
978
979    if (cRootData != 0) {
980        cRootCap = updateCapData(false, cRootData, cRootCap);
981    }
982
983    dc_ret = deriveCap(cRootSlot, cRootCap);
984    if (dc_ret.status != EXCEPTION_NONE) {
985        return dc_ret.status;
986    }
987    cRootCap = dc_ret.cap;
988
989    if (cap_get_capType(cRootCap) != cap_cnode_cap) {
990        userError("TCB Configure: CSpace cap is invalid.");
991        current_syscall_error.type = seL4_IllegalOperation;
992        return EXCEPTION_SYSCALL_ERROR;
993    }
994
995    if (vRootData != 0) {
996        vRootCap = updateCapData(false, vRootData, vRootCap);
997    }
998
999    dc_ret = deriveCap(vRootSlot, vRootCap);
1000    if (dc_ret.status != EXCEPTION_NONE) {
1001        return dc_ret.status;
1002    }
1003    vRootCap = dc_ret.cap;
1004
1005    if (!isValidVTableRoot(vRootCap)) {
1006        userError("TCB Configure: VSpace cap is invalid.");
1007        current_syscall_error.type = seL4_IllegalOperation;
1008        return EXCEPTION_SYSCALL_ERROR;
1009    }
1010
1011    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1012    return invokeTCB_ThreadControl(
1013               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1014               faultEP, NULL_PRIO, NULL_PRIO,
1015               cRootCap, cRootSlot,
1016               vRootCap, vRootSlot,
1017               bufferAddr, bufferCap,
1018               bufferSlot, thread_control_update_space |
1019               thread_control_update_ipc_buffer);
1020}
1021
1022exception_t
1023decodeSetPriority(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1024{
1025    if (length < 1 || excaps.excaprefs[0] == NULL) {
1026        userError("TCB SetPriority: Truncated message.");
1027        current_syscall_error.type = seL4_TruncatedMessage;
1028        return EXCEPTION_SYSCALL_ERROR;
1029    }
1030
1031    prio_t newPrio = getSyscallArg(0, buffer);
1032    cap_t authCap = excaps.excaprefs[0]->cap;
1033
1034    if (cap_get_capType(authCap) != cap_thread_cap) {
1035        userError("Set priority: authority cap not a TCB.");
1036        current_syscall_error.type = seL4_InvalidCapability;
1037        current_syscall_error.invalidCapNumber = 1;
1038        return EXCEPTION_SYSCALL_ERROR;
1039    }
1040
1041    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1042    exception_t status = checkPrio(newPrio, authTCB);
1043    if (status != EXCEPTION_NONE) {
1044        userError("TCB SetPriority: Requested priority %lu too high (max %lu).",
1045                  (unsigned long) newPrio, (unsigned long) authTCB->tcbMCP);
1046        return status;
1047    }
1048
1049    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1050    return invokeTCB_ThreadControl(
1051               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1052               0, NULL_PRIO, newPrio,
1053               cap_null_cap_new(), NULL,
1054               cap_null_cap_new(), NULL,
1055               0, cap_null_cap_new(),
1056               NULL, thread_control_update_priority);
1057}
1058
1059exception_t
1060decodeSetMCPriority(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1061{
1062    if (length < 1 || excaps.excaprefs[0] == NULL) {
1063        userError("TCB SetMCPriority: Truncated message.");
1064        current_syscall_error.type = seL4_TruncatedMessage;
1065        return EXCEPTION_SYSCALL_ERROR;
1066    }
1067
1068    prio_t newMcp = getSyscallArg(0, buffer);
1069    cap_t authCap = excaps.excaprefs[0]->cap;
1070
1071    if (cap_get_capType(authCap) != cap_thread_cap) {
1072        userError("TCB SetMCPriority: authority cap not a TCB.");
1073        current_syscall_error.type = seL4_InvalidCapability;
1074        current_syscall_error.invalidCapNumber = 1;
1075        return EXCEPTION_SYSCALL_ERROR;
1076    }
1077
1078    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1079    exception_t status = checkPrio(newMcp, authTCB);
1080    if (status != EXCEPTION_NONE) {
1081        userError("TCB SetMCPriority: Requested maximum controlled priority %lu too high (max %lu).",
1082                  (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1083        return status;
1084    }
1085
1086    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1087    return invokeTCB_ThreadControl(
1088               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1089               0, newMcp, NULL_PRIO,
1090               cap_null_cap_new(), NULL,
1091               cap_null_cap_new(), NULL,
1092               0, cap_null_cap_new(),
1093               NULL, thread_control_update_mcp);
1094}
1095
1096exception_t
1097decodeSetSchedParams(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1098{
1099    if (length < 2 || excaps.excaprefs[0] == NULL) {
1100        userError("TCB SetSchedParams: Truncated message.");
1101        current_syscall_error.type = seL4_TruncatedMessage;
1102        return EXCEPTION_SYSCALL_ERROR;
1103    }
1104
1105    prio_t newMcp = getSyscallArg(0, buffer);
1106    prio_t newPrio = getSyscallArg(1, buffer);
1107    cap_t authCap = excaps.excaprefs[0]->cap;
1108
1109    if (cap_get_capType(authCap) != cap_thread_cap) {
1110        userError("TCB SetSchedParams: authority cap not a TCB.");
1111        current_syscall_error.type = seL4_InvalidCapability;
1112        current_syscall_error.invalidCapNumber = 1;
1113        return EXCEPTION_SYSCALL_ERROR;
1114    }
1115
1116    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1117    exception_t status = checkPrio(newMcp, authTCB);
1118    if (status != EXCEPTION_NONE) {
1119        userError("TCB SetSchedParams: Requested maximum controlled priority %lu too high (max %lu).",
1120                  (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1121        return status;
1122    }
1123
1124    status = checkPrio(newPrio, authTCB);
1125    if (status != EXCEPTION_NONE) {
1126        userError("TCB SetSchedParams: Requested priority %lu too high (max %lu).",
1127                  (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1128        return status;
1129    }
1130
1131    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1132    return invokeTCB_ThreadControl(
1133               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1134               0, newMcp, newPrio,
1135               cap_null_cap_new(), NULL,
1136               cap_null_cap_new(), NULL,
1137               0, cap_null_cap_new(),
1138               NULL, thread_control_update_mcp |
1139               thread_control_update_priority);
1140}
1141
1142
1143exception_t
1144decodeSetIPCBuffer(cap_t cap, word_t length, cte_t* slot,
1145                   extra_caps_t excaps, word_t *buffer)
1146{
1147    cptr_t cptr_bufferPtr;
1148    cap_t bufferCap;
1149    cte_t *bufferSlot;
1150
1151    if (length < 1 || excaps.excaprefs[0] == NULL) {
1152        userError("TCB SetIPCBuffer: Truncated message.");
1153        current_syscall_error.type = seL4_TruncatedMessage;
1154        return EXCEPTION_SYSCALL_ERROR;
1155    }
1156
1157    cptr_bufferPtr  = getSyscallArg(0, buffer);
1158    bufferSlot = excaps.excaprefs[0];
1159    bufferCap  = excaps.excaprefs[0]->cap;
1160
1161    if (cptr_bufferPtr == 0) {
1162        bufferSlot = NULL;
1163    } else {
1164        exception_t e;
1165        deriveCap_ret_t dc_ret;
1166
1167        dc_ret = deriveCap(bufferSlot, bufferCap);
1168        if (dc_ret.status != EXCEPTION_NONE) {
1169            return dc_ret.status;
1170        }
1171        bufferCap = dc_ret.cap;
1172        e = checkValidIPCBuffer(cptr_bufferPtr, bufferCap);
1173        if (e != EXCEPTION_NONE) {
1174            return e;
1175        }
1176    }
1177
1178    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1179    return invokeTCB_ThreadControl(
1180               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1181               0, NULL_PRIO, NULL_PRIO,
1182               cap_null_cap_new(), NULL,
1183               cap_null_cap_new(), NULL,
1184               cptr_bufferPtr, bufferCap,
1185               bufferSlot, thread_control_update_ipc_buffer);
1186}
1187
1188exception_t
1189decodeSetSpace(cap_t cap, word_t length, cte_t* slot,
1190               extra_caps_t excaps, word_t *buffer)
1191{
1192    cptr_t faultEP;
1193    word_t cRootData, vRootData;
1194    cte_t *cRootSlot, *vRootSlot;
1195    cap_t cRootCap, vRootCap;
1196    deriveCap_ret_t dc_ret;
1197
1198    if (length < 3 || excaps.excaprefs[0] == NULL
1199            || excaps.excaprefs[1] == NULL) {
1200        userError("TCB SetSpace: Truncated message.");
1201        current_syscall_error.type = seL4_TruncatedMessage;
1202        return EXCEPTION_SYSCALL_ERROR;
1203    }
1204
1205    faultEP   = getSyscallArg(0, buffer);
1206    cRootData = getSyscallArg(1, buffer);
1207    vRootData = getSyscallArg(2, buffer);
1208
1209    cRootSlot  = excaps.excaprefs[0];
1210    cRootCap   = excaps.excaprefs[0]->cap;
1211    vRootSlot  = excaps.excaprefs[1];
1212    vRootCap   = excaps.excaprefs[1]->cap;
1213
1214    if (slotCapLongRunningDelete(
1215                TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
1216            slotCapLongRunningDelete(
1217                TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
1218        userError("TCB SetSpace: CSpace or VSpace currently being deleted.");
1219        current_syscall_error.type = seL4_IllegalOperation;
1220        return EXCEPTION_SYSCALL_ERROR;
1221    }
1222
1223    if (cRootData != 0) {
1224        cRootCap = updateCapData(false, cRootData, cRootCap);
1225    }
1226
1227    dc_ret = deriveCap(cRootSlot, cRootCap);
1228    if (dc_ret.status != EXCEPTION_NONE) {
1229        return dc_ret.status;
1230    }
1231    cRootCap = dc_ret.cap;
1232
1233    if (cap_get_capType(cRootCap) != cap_cnode_cap) {
1234        userError("TCB SetSpace: Invalid CNode cap.");
1235        current_syscall_error.type = seL4_IllegalOperation;
1236        return EXCEPTION_SYSCALL_ERROR;
1237    }
1238
1239    if (vRootData != 0) {
1240        vRootCap = updateCapData(false, vRootData, vRootCap);
1241    }
1242
1243    dc_ret = deriveCap(vRootSlot, vRootCap);
1244    if (dc_ret.status != EXCEPTION_NONE) {
1245        return dc_ret.status;
1246    }
1247    vRootCap = dc_ret.cap;
1248
1249    if (!isValidVTableRoot(vRootCap)) {
1250        userError("TCB SetSpace: Invalid VSpace cap.");
1251        current_syscall_error.type = seL4_IllegalOperation;
1252        return EXCEPTION_SYSCALL_ERROR;
1253    }
1254
1255    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1256    return invokeTCB_ThreadControl(
1257               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1258               faultEP,
1259               NULL_PRIO, NULL_PRIO,
1260               cRootCap, cRootSlot,
1261               vRootCap, vRootSlot,
1262               0, cap_null_cap_new(), NULL, thread_control_update_space);
1263}
1264
1265exception_t
1266decodeDomainInvocation(word_t invLabel, word_t length, extra_caps_t excaps, word_t *buffer)
1267{
1268    word_t domain;
1269    cap_t tcap;
1270
1271    if (unlikely(invLabel != DomainSetSet)) {
1272        current_syscall_error.type = seL4_IllegalOperation;
1273        return EXCEPTION_SYSCALL_ERROR;
1274    }
1275
1276    if (unlikely(length == 0)) {
1277        userError("Domain Configure: Truncated message.");
1278        current_syscall_error.type = seL4_TruncatedMessage;
1279        return EXCEPTION_SYSCALL_ERROR;
1280    } else {
1281        domain = getSyscallArg(0, buffer);
1282        if (domain >= CONFIG_NUM_DOMAINS) {
1283            userError("Domain Configure: invalid domain (%lu >= %u).",
1284                      domain, CONFIG_NUM_DOMAINS);
1285            current_syscall_error.type = seL4_InvalidArgument;
1286            current_syscall_error.invalidArgumentNumber = 0;
1287            return EXCEPTION_SYSCALL_ERROR;
1288        }
1289    }
1290
1291    if (unlikely(excaps.excaprefs[0] == NULL)) {
1292        userError("Domain Configure: Truncated message.");
1293        current_syscall_error.type = seL4_TruncatedMessage;
1294        return EXCEPTION_SYSCALL_ERROR;
1295    }
1296
1297    tcap = excaps.excaprefs[0]->cap;
1298    if (unlikely(cap_get_capType(tcap) != cap_thread_cap)) {
1299        userError("Domain Configure: thread cap required.");
1300        current_syscall_error.type = seL4_InvalidArgument;
1301        current_syscall_error.invalidArgumentNumber = 1;
1302        return EXCEPTION_SYSCALL_ERROR;
1303    }
1304
1305    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1306    setDomain(TCB_PTR(cap_thread_cap_get_capTCBPtr(tcap)), domain);
1307    return EXCEPTION_NONE;
1308}
1309
1310exception_t
1311decodeBindNotification(cap_t cap, extra_caps_t excaps)
1312{
1313    notification_t *ntfnPtr;
1314    tcb_t *tcb;
1315    cap_t ntfn_cap;
1316
1317    if (excaps.excaprefs[0] == NULL) {
1318        userError("TCB BindNotification: Truncated message.");
1319        current_syscall_error.type = seL4_TruncatedMessage;
1320        return EXCEPTION_SYSCALL_ERROR;
1321    }
1322
1323    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1324
1325    if (tcb->tcbBoundNotification) {
1326        userError("TCB BindNotification: TCB already has a bound notification.");
1327        current_syscall_error.type = seL4_IllegalOperation;
1328        return EXCEPTION_SYSCALL_ERROR;
1329    }
1330
1331    ntfn_cap = excaps.excaprefs[0]->cap;
1332
1333    if (cap_get_capType(ntfn_cap) == cap_notification_cap) {
1334        ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(ntfn_cap));
1335    } else {
1336        userError("TCB BindNotification: Notification is invalid.");
1337        current_syscall_error.type = seL4_IllegalOperation;
1338        return EXCEPTION_SYSCALL_ERROR;
1339    }
1340
1341    if (!cap_notification_cap_get_capNtfnCanReceive(ntfn_cap)) {
1342        userError("TCB BindNotification: Insufficient access rights");
1343        current_syscall_error.type = seL4_IllegalOperation;
1344        return EXCEPTION_SYSCALL_ERROR;
1345    }
1346
1347    if ((tcb_t*)notification_ptr_get_ntfnQueue_head(ntfnPtr)
1348            || (tcb_t*)notification_ptr_get_ntfnBoundTCB(ntfnPtr)) {
1349        userError("TCB BindNotification: Notification cannot be bound.");
1350        current_syscall_error.type = seL4_IllegalOperation;
1351        return EXCEPTION_SYSCALL_ERROR;
1352    }
1353
1354
1355    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1356    return invokeTCB_NotificationControl(tcb, ntfnPtr);
1357}
1358
1359exception_t
1360decodeUnbindNotification(cap_t cap)
1361{
1362    tcb_t *tcb;
1363
1364    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1365
1366    if (!tcb->tcbBoundNotification) {
1367        userError("TCB UnbindNotification: TCB already has no bound Notification.");
1368        current_syscall_error.type = seL4_IllegalOperation;
1369        return EXCEPTION_SYSCALL_ERROR;
1370    }
1371
1372    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1373    return invokeTCB_NotificationControl(tcb, NULL);
1374}
1375
1376/* The following functions sit in the preemption monad and implement the
1377 * preemptible, non-faulting bottom end of a TCB invocation. */
1378exception_t
1379invokeTCB_Suspend(tcb_t *thread)
1380{
1381    suspend(thread);
1382    return EXCEPTION_NONE;
1383}
1384
1385exception_t
1386invokeTCB_Resume(tcb_t *thread)
1387{
1388    restart(thread);
1389    return EXCEPTION_NONE;
1390}
1391
1392exception_t
1393invokeTCB_ThreadControl(tcb_t *target, cte_t* slot,
1394                        cptr_t faultep, prio_t mcp, prio_t priority,
1395                        cap_t cRoot_newCap, cte_t *cRoot_srcSlot,
1396                        cap_t vRoot_newCap, cte_t *vRoot_srcSlot,
1397                        word_t bufferAddr, cap_t bufferCap,
1398                        cte_t *bufferSrcSlot,
1399                        thread_control_flag_t updateFlags)
1400{
1401    exception_t e;
1402    cap_t tCap = cap_thread_cap_new((word_t)target);
1403
1404    if (updateFlags & thread_control_update_space) {
1405        target->tcbFaultHandler = faultep;
1406    }
1407
1408    if (updateFlags & thread_control_update_mcp) {
1409        setMCPriority(target, mcp);
1410    }
1411
1412    if (updateFlags & thread_control_update_priority) {
1413        setPriority(target, priority);
1414    }
1415
1416    if (updateFlags & thread_control_update_space) {
1417        cte_t *rootSlot;
1418
1419        rootSlot = TCB_PTR_CTE_PTR(target, tcbCTable);
1420        e = cteDelete(rootSlot, true);
1421        if (e != EXCEPTION_NONE) {
1422            return e;
1423        }
1424        if (sameObjectAs(cRoot_newCap, cRoot_srcSlot->cap) &&
1425                sameObjectAs(tCap, slot->cap)) {
1426            cteInsert(cRoot_newCap, cRoot_srcSlot, rootSlot);
1427        }
1428    }
1429
1430    if (updateFlags & thread_control_update_space) {
1431        cte_t *rootSlot;
1432
1433        rootSlot = TCB_PTR_CTE_PTR(target, tcbVTable);
1434        e = cteDelete(rootSlot, true);
1435        if (e != EXCEPTION_NONE) {
1436            return e;
1437        }
1438        if (sameObjectAs(vRoot_newCap, vRoot_srcSlot->cap) &&
1439                sameObjectAs(tCap, slot->cap)) {
1440            cteInsert(vRoot_newCap, vRoot_srcSlot, rootSlot);
1441        }
1442    }
1443
1444    if (updateFlags & thread_control_update_ipc_buffer) {
1445        cte_t *bufferSlot;
1446
1447        bufferSlot = TCB_PTR_CTE_PTR(target, tcbBuffer);
1448        e = cteDelete(bufferSlot, true);
1449        if (e != EXCEPTION_NONE) {
1450            return e;
1451        }
1452        target->tcbIPCBuffer = bufferAddr;
1453
1454        Arch_setTCBIPCBuffer(target, bufferAddr);
1455
1456        if (bufferSrcSlot && sameObjectAs(bufferCap, bufferSrcSlot->cap) &&
1457                sameObjectAs(tCap, slot->cap)) {
1458            cteInsert(bufferCap, bufferSrcSlot, bufferSlot);
1459        }
1460
1461        if (target == NODE_STATE(ksCurThread)) {
1462            rescheduleRequired();
1463        }
1464    }
1465
1466    return EXCEPTION_NONE;
1467}
1468
1469exception_t
1470invokeTCB_CopyRegisters(tcb_t *dest, tcb_t *tcb_src,
1471                        bool_t suspendSource, bool_t resumeTarget,
1472                        bool_t transferFrame, bool_t transferInteger,
1473                        word_t transferArch)
1474{
1475    if (suspendSource) {
1476        suspend(tcb_src);
1477    }
1478
1479    if (resumeTarget) {
1480        restart(dest);
1481    }
1482
1483    if (transferFrame) {
1484        word_t i;
1485        word_t v;
1486        word_t pc;
1487
1488        for (i = 0; i < n_frameRegisters; i++) {
1489            v = getRegister(tcb_src, frameRegisters[i]);
1490            setRegister(dest, frameRegisters[i], v);
1491        }
1492
1493        pc = getRestartPC(dest);
1494        setNextPC(dest, pc);
1495    }
1496
1497    if (transferInteger) {
1498        word_t i;
1499        word_t v;
1500
1501        for (i = 0; i < n_gpRegisters; i++) {
1502            v = getRegister(tcb_src, gpRegisters[i]);
1503            setRegister(dest, gpRegisters[i], v);
1504        }
1505    }
1506
1507    Arch_postModifyRegisters(dest);
1508
1509    if (dest == NODE_STATE(ksCurThread)) {
1510        /* If we modified the current thread we may need to reschedule
1511         * due to changing registers are only reloaded in Arch_switchToThread */
1512        rescheduleRequired();
1513    }
1514
1515    return Arch_performTransfer(transferArch, tcb_src, dest);
1516}
1517
1518/* ReadRegisters is a special case: replyFromKernel & setMRs are
1519 * unfolded here, in order to avoid passing the large reply message up
1520 * to the top level in a global (and double-copying). We prevent the
1521 * top-level replyFromKernel_success_empty() from running by setting the
1522 * thread state. Retype does this too.
1523 */
1524exception_t
1525invokeTCB_ReadRegisters(tcb_t *tcb_src, bool_t suspendSource,
1526                        word_t n, word_t arch, bool_t call)
1527{
1528    word_t i, j;
1529    exception_t e;
1530    tcb_t *thread;
1531
1532    thread = NODE_STATE(ksCurThread);
1533
1534    if (suspendSource) {
1535        suspend(tcb_src);
1536    }
1537
1538    e = Arch_performTransfer(arch, tcb_src, NODE_STATE(ksCurThread));
1539    if (e != EXCEPTION_NONE) {
1540        return e;
1541    }
1542
1543    if (call) {
1544        word_t *ipcBuffer;
1545
1546        ipcBuffer = lookupIPCBuffer(true, thread);
1547
1548        setRegister(thread, badgeRegister, 0);
1549
1550        for (i = 0; i < n && i < n_frameRegisters && i < n_msgRegisters; i++) {
1551            setRegister(thread, msgRegisters[i],
1552                        getRegister(tcb_src, frameRegisters[i]));
1553        }
1554
1555        if (ipcBuffer != NULL && i < n && i < n_frameRegisters) {
1556            for (; i < n && i < n_frameRegisters; i++) {
1557                ipcBuffer[i + 1] = getRegister(tcb_src, frameRegisters[i]);
1558            }
1559        }
1560
1561        j = i;
1562
1563        for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n
1564                && i + n_frameRegisters < n_msgRegisters; i++) {
1565            setRegister(thread, msgRegisters[i + n_frameRegisters],
1566                        getRegister(tcb_src, gpRegisters[i]));
1567        }
1568
1569        if (ipcBuffer != NULL && i < n_gpRegisters
1570                && i + n_frameRegisters < n) {
1571            for (; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
1572                ipcBuffer[i + n_frameRegisters + 1] =
1573                    getRegister(tcb_src, gpRegisters[i]);
1574            }
1575        }
1576
1577        setRegister(thread, msgInfoRegister, wordFromMessageInfo(
1578                        seL4_MessageInfo_new(0, 0, 0, i + j)));
1579    }
1580    setThreadState(thread, ThreadState_Running);
1581
1582    return EXCEPTION_NONE;
1583}
1584
1585exception_t
1586invokeTCB_WriteRegisters(tcb_t *dest, bool_t resumeTarget,
1587                         word_t n, word_t arch, word_t *buffer)
1588{
1589    word_t i;
1590    word_t pc;
1591    exception_t e;
1592    bool_t archInfo;
1593
1594    e = Arch_performTransfer(arch, NODE_STATE(ksCurThread), dest);
1595    if (e != EXCEPTION_NONE) {
1596        return e;
1597    }
1598
1599    if (n > n_frameRegisters + n_gpRegisters) {
1600        n = n_frameRegisters + n_gpRegisters;
1601    }
1602
1603    archInfo = Arch_getSanitiseRegisterInfo(dest);
1604
1605    for (i = 0; i < n_frameRegisters && i < n; i++) {
1606        /* Offset of 2 to get past the initial syscall arguments */
1607        setRegister(dest, frameRegisters[i],
1608                    sanitiseRegister(frameRegisters[i],
1609                                     getSyscallArg(i + 2, buffer), archInfo));
1610    }
1611
1612    for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
1613        setRegister(dest, gpRegisters[i],
1614                    sanitiseRegister(gpRegisters[i],
1615                                     getSyscallArg(i + n_frameRegisters + 2,
1616                                                   buffer), archInfo));
1617    }
1618
1619    pc = getRestartPC(dest);
1620    setNextPC(dest, pc);
1621
1622    Arch_postModifyRegisters(dest);
1623
1624    if (resumeTarget) {
1625        restart(dest);
1626    }
1627
1628    if (dest == NODE_STATE(ksCurThread)) {
1629        /* If we modified the current thread we may need to reschedule
1630         * due to changing registers are only reloaded in Arch_switchToThread */
1631        rescheduleRequired();
1632    }
1633
1634    return EXCEPTION_NONE;
1635}
1636
1637exception_t
1638invokeTCB_NotificationControl(tcb_t *tcb, notification_t *ntfnPtr)
1639{
1640    if (ntfnPtr) {
1641        bindNotification(tcb, ntfnPtr);
1642    } else {
1643        unbindNotification(tcb);
1644    }
1645
1646    return EXCEPTION_NONE;
1647}
1648
1649#ifdef CONFIG_DEBUG_BUILD
1650void
1651setThreadName(tcb_t *tcb, const char *name)
1652{
1653    strlcpy(tcb->tcbName, name, TCB_NAME_LENGTH);
1654}
1655#endif
1656
1657word_t
1658setMRs_syscall_error(tcb_t *thread, word_t *receiveIPCBuffer)
1659{
1660    switch (current_syscall_error.type) {
1661    case seL4_InvalidArgument:
1662        return setMR(thread, receiveIPCBuffer, 0,
1663                     current_syscall_error.invalidArgumentNumber);
1664
1665    case seL4_InvalidCapability:
1666        return setMR(thread, receiveIPCBuffer, 0,
1667                     current_syscall_error.invalidCapNumber);
1668
1669    case seL4_IllegalOperation:
1670        return 0;
1671
1672    case seL4_RangeError:
1673        setMR(thread, receiveIPCBuffer, 0,
1674              current_syscall_error.rangeErrorMin);
1675        return setMR(thread, receiveIPCBuffer, 1,
1676                     current_syscall_error.rangeErrorMax);
1677
1678    case seL4_AlignmentError:
1679        return 0;
1680
1681    case seL4_FailedLookup:
1682        setMR(thread, receiveIPCBuffer, 0,
1683              current_syscall_error.failedLookupWasSource ? 1 : 0);
1684        return setMRs_lookup_failure(thread, receiveIPCBuffer,
1685                                     current_lookup_fault, 1);
1686
1687    case seL4_TruncatedMessage:
1688    case seL4_DeleteFirst:
1689    case seL4_RevokeFirst:
1690        return 0;
1691    case seL4_NotEnoughMemory:
1692        return setMR(thread, receiveIPCBuffer, 0,
1693                     current_syscall_error.memoryLeft);
1694    default:
1695        fail("Invalid syscall error");
1696    }
1697}
1698