1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <types.h>
9#include <api/failures.h>
10#include <api/invocation.h>
11#include <api/syscall.h>
12#include <sel4/shared_types.h>
13#include <machine/io.h>
14#include <object/structures.h>
15#include <object/objecttype.h>
16#include <object/cnode.h>
17#ifdef CONFIG_KERNEL_MCS
18#include <object/schedcontext.h>
19#endif
20#include <object/tcb.h>
21#include <kernel/cspace.h>
22#include <kernel/thread.h>
23#include <kernel/vspace.h>
24#include <model/statedata.h>
25#include <util.h>
26#include <string.h>
27#include <stdint.h>
28#include <arch/smp/ipi_inline.h>
29
30#define NULL_PRIO 0
31
32static exception_t checkPrio(prio_t prio, tcb_t *auth)
33{
34    prio_t mcp;
35
36    mcp = auth->tcbMCP;
37
38    /* system invariant: existing MCPs are bounded */
39    assert(mcp <= seL4_MaxPrio);
40
41    /* can't assign a priority greater than our own mcp */
42    if (prio > mcp) {
43        current_syscall_error.type = seL4_RangeError;
44        current_syscall_error.rangeErrorMin = seL4_MinPrio;
45        current_syscall_error.rangeErrorMax = mcp;
46        return EXCEPTION_SYSCALL_ERROR;
47    }
48
49    return EXCEPTION_NONE;
50}
51
52static inline void addToBitmap(word_t cpu, word_t dom, word_t prio)
53{
54    word_t l1index;
55    word_t l1index_inverted;
56
57    l1index = prio_to_l1index(prio);
58    l1index_inverted = invert_l1index(l1index);
59
60    NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) |= BIT(l1index);
61    /* we invert the l1 index when accessed the 2nd level of the bitmap in
62       order to increase the liklihood that high prio threads l2 index word will
63       be on the same cache line as the l1 index word - this makes sure the
64       fastpath is fastest for high prio threads */
65    NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) |= BIT(prio & MASK(wordRadix));
66}
67
68static inline void removeFromBitmap(word_t cpu, word_t dom, word_t prio)
69{
70    word_t l1index;
71    word_t l1index_inverted;
72
73    l1index = prio_to_l1index(prio);
74    l1index_inverted = invert_l1index(l1index);
75    NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu) &= ~BIT(prio & MASK(wordRadix));
76    if (unlikely(!NODE_STATE_ON_CORE(ksReadyQueuesL2Bitmap[dom][l1index_inverted], cpu))) {
77        NODE_STATE_ON_CORE(ksReadyQueuesL1Bitmap[dom], cpu) &= ~BIT(l1index);
78    }
79}
80
81/* Add TCB to the head of a scheduler queue */
82void tcbSchedEnqueue(tcb_t *tcb)
83{
84#ifdef CONFIG_KERNEL_MCS
85    assert(isSchedulable(tcb));
86    assert(refill_sufficient(tcb->tcbSchedContext, 0));
87#endif
88
89    if (!thread_state_get_tcbQueued(tcb->tcbState)) {
90        tcb_queue_t queue;
91        dom_t dom;
92        prio_t prio;
93        word_t idx;
94
95        dom = tcb->tcbDomain;
96        prio = tcb->tcbPriority;
97        idx = ready_queues_index(dom, prio);
98        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
99
100        if (!queue.end) { /* Empty list */
101            queue.end = tcb;
102            addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
103        } else {
104            queue.head->tcbSchedPrev = tcb;
105        }
106        tcb->tcbSchedPrev = NULL;
107        tcb->tcbSchedNext = queue.head;
108        queue.head = tcb;
109
110        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
111
112        thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
113    }
114}
115
116/* Add TCB to the end of a scheduler queue */
117void tcbSchedAppend(tcb_t *tcb)
118{
119#ifdef CONFIG_KERNEL_MCS
120    assert(isSchedulable(tcb));
121    assert(refill_sufficient(tcb->tcbSchedContext, 0));
122    assert(refill_ready(tcb->tcbSchedContext));
123#endif
124    if (!thread_state_get_tcbQueued(tcb->tcbState)) {
125        tcb_queue_t queue;
126        dom_t dom;
127        prio_t prio;
128        word_t idx;
129
130        dom = tcb->tcbDomain;
131        prio = tcb->tcbPriority;
132        idx = ready_queues_index(dom, prio);
133        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
134
135        if (!queue.head) { /* Empty list */
136            queue.head = tcb;
137            addToBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
138        } else {
139            queue.end->tcbSchedNext = tcb;
140        }
141        tcb->tcbSchedPrev = queue.end;
142        tcb->tcbSchedNext = NULL;
143        queue.end = tcb;
144
145        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
146
147        thread_state_ptr_set_tcbQueued(&tcb->tcbState, true);
148    }
149}
150
151/* Remove TCB from a scheduler queue */
152void tcbSchedDequeue(tcb_t *tcb)
153{
154    if (thread_state_get_tcbQueued(tcb->tcbState)) {
155        tcb_queue_t queue;
156        dom_t dom;
157        prio_t prio;
158        word_t idx;
159
160        dom = tcb->tcbDomain;
161        prio = tcb->tcbPriority;
162        idx = ready_queues_index(dom, prio);
163        queue = NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity);
164
165        if (tcb->tcbSchedPrev) {
166            tcb->tcbSchedPrev->tcbSchedNext = tcb->tcbSchedNext;
167        } else {
168            queue.head = tcb->tcbSchedNext;
169            if (likely(!tcb->tcbSchedNext)) {
170                removeFromBitmap(SMP_TERNARY(tcb->tcbAffinity, 0), dom, prio);
171            }
172        }
173
174        if (tcb->tcbSchedNext) {
175            tcb->tcbSchedNext->tcbSchedPrev = tcb->tcbSchedPrev;
176        } else {
177            queue.end = tcb->tcbSchedPrev;
178        }
179
180        NODE_STATE_ON_CORE(ksReadyQueues[idx], tcb->tcbAffinity) = queue;
181
182        thread_state_ptr_set_tcbQueued(&tcb->tcbState, false);
183    }
184}
185
186#ifdef CONFIG_DEBUG_BUILD
187void tcbDebugAppend(tcb_t *tcb)
188{
189    debug_tcb_t *debug_tcb = TCB_PTR_DEBUG_PTR(tcb);
190    /* prepend to the list */
191    debug_tcb->tcbDebugPrev = NULL;
192
193    debug_tcb->tcbDebugNext = NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity);
194
195    if (NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
196        TCB_PTR_DEBUG_PTR(NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity))->tcbDebugPrev = tcb;
197    }
198
199    NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = tcb;
200}
201
202void tcbDebugRemove(tcb_t *tcb)
203{
204    debug_tcb_t *debug_tcb = TCB_PTR_DEBUG_PTR(tcb);
205
206    assert(NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) != NULL);
207    if (tcb == NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity)) {
208        NODE_STATE_ON_CORE(ksDebugTCBs, tcb->tcbAffinity) = TCB_PTR_DEBUG_PTR(NODE_STATE_ON_CORE(ksDebugTCBs,
209                                                                                                 tcb->tcbAffinity))->tcbDebugNext;
210    } else {
211        assert(TCB_PTR_DEBUG_PTR(tcb)->tcbDebugPrev);
212        TCB_PTR_DEBUG_PTR(debug_tcb->tcbDebugPrev)->tcbDebugNext = debug_tcb->tcbDebugNext;
213    }
214
215    if (debug_tcb->tcbDebugNext) {
216        TCB_PTR_DEBUG_PTR(debug_tcb->tcbDebugNext)->tcbDebugPrev = debug_tcb->tcbDebugPrev;
217    }
218
219    debug_tcb->tcbDebugPrev = NULL;
220    debug_tcb->tcbDebugNext = NULL;
221}
222#endif /* CONFIG_DEBUG_BUILD */
223
224#ifndef CONFIG_KERNEL_MCS
225/* Add TCB to the end of an endpoint queue */
226tcb_queue_t tcbEPAppend(tcb_t *tcb, tcb_queue_t queue)
227{
228    if (!queue.head) { /* Empty list */
229        queue.head = tcb;
230    } else {
231        queue.end->tcbEPNext = tcb;
232    }
233    tcb->tcbEPPrev = queue.end;
234    tcb->tcbEPNext = NULL;
235    queue.end = tcb;
236
237    return queue;
238}
239#endif
240
241/* Remove TCB from an endpoint queue */
242tcb_queue_t tcbEPDequeue(tcb_t *tcb, tcb_queue_t queue)
243{
244    if (tcb->tcbEPPrev) {
245        tcb->tcbEPPrev->tcbEPNext = tcb->tcbEPNext;
246    } else {
247        queue.head = tcb->tcbEPNext;
248    }
249
250    if (tcb->tcbEPNext) {
251        tcb->tcbEPNext->tcbEPPrev = tcb->tcbEPPrev;
252    } else {
253        queue.end = tcb->tcbEPPrev;
254    }
255
256    return queue;
257}
258
259#ifdef CONFIG_KERNEL_MCS
260void tcbReleaseRemove(tcb_t *tcb)
261{
262    if (likely(thread_state_get_tcbInReleaseQueue(tcb->tcbState))) {
263        if (tcb->tcbSchedPrev) {
264            tcb->tcbSchedPrev->tcbSchedNext = tcb->tcbSchedNext;
265        } else {
266            NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity) = tcb->tcbSchedNext;
267            /* the head has changed, we might need to set a new timeout */
268            NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity) = true;
269        }
270
271        if (tcb->tcbSchedNext) {
272            tcb->tcbSchedNext->tcbSchedPrev = tcb->tcbSchedPrev;
273        }
274
275        tcb->tcbSchedNext = NULL;
276        tcb->tcbSchedPrev = NULL;
277        thread_state_ptr_set_tcbInReleaseQueue(&tcb->tcbState, false);
278    }
279}
280
281void tcbReleaseEnqueue(tcb_t *tcb)
282{
283    assert(thread_state_get_tcbInReleaseQueue(tcb->tcbState) == false);
284    assert(thread_state_get_tcbQueued(tcb->tcbState) == false);
285
286    tcb_t *before = NULL;
287    tcb_t *after = NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity);
288
289    /* find our place in the ordered queue */
290    while (after != NULL &&
291           refill_head(tcb->tcbSchedContext)->rTime >= refill_head(after->tcbSchedContext)->rTime) {
292        before = after;
293        after = after->tcbSchedNext;
294    }
295
296    if (before == NULL) {
297        /* insert at head */
298        NODE_STATE_ON_CORE(ksReleaseHead, tcb->tcbAffinity) = tcb;
299        NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity) = true;
300    } else {
301        before->tcbSchedNext = tcb;
302    }
303
304    if (after != NULL) {
305        after->tcbSchedPrev = tcb;
306    }
307
308    tcb->tcbSchedNext = after;
309    tcb->tcbSchedPrev = before;
310
311    thread_state_ptr_set_tcbInReleaseQueue(&tcb->tcbState, true);
312}
313
314tcb_t *tcbReleaseDequeue(void)
315{
316    assert(NODE_STATE(ksReleaseHead) != NULL);
317    assert(NODE_STATE(ksReleaseHead)->tcbSchedPrev == NULL);
318    SMP_COND_STATEMENT(assert(NODE_STATE(ksReleaseHead)->tcbAffinity == getCurrentCPUIndex()));
319
320    tcb_t *detached_head = NODE_STATE(ksReleaseHead);
321    NODE_STATE(ksReleaseHead) = NODE_STATE(ksReleaseHead)->tcbSchedNext;
322
323    if (NODE_STATE(ksReleaseHead)) {
324        NODE_STATE(ksReleaseHead)->tcbSchedPrev = NULL;
325    }
326
327    if (detached_head->tcbSchedNext) {
328        detached_head->tcbSchedNext->tcbSchedPrev = NULL;
329        detached_head->tcbSchedNext = NULL;
330    }
331
332    thread_state_ptr_set_tcbInReleaseQueue(&detached_head->tcbState, false);
333    NODE_STATE(ksReprogram) = true;
334
335    return detached_head;
336}
337#endif
338
339cptr_t PURE getExtraCPtr(word_t *bufferPtr, word_t i)
340{
341    return (cptr_t)bufferPtr[seL4_MsgMaxLength + 2 + i];
342}
343
344void setExtraBadge(word_t *bufferPtr, word_t badge,
345                   word_t i)
346{
347    bufferPtr[seL4_MsgMaxLength + 2 + i] = badge;
348}
349
350#ifndef CONFIG_KERNEL_MCS
351void setupCallerCap(tcb_t *sender, tcb_t *receiver, bool_t canGrant)
352{
353    cte_t *replySlot, *callerSlot;
354    cap_t masterCap UNUSED, callerCap UNUSED;
355
356    setThreadState(sender, ThreadState_BlockedOnReply);
357    replySlot = TCB_PTR_CTE_PTR(sender, tcbReply);
358    masterCap = replySlot->cap;
359    /* Haskell error: "Sender must have a valid master reply cap" */
360    assert(cap_get_capType(masterCap) == cap_reply_cap);
361    assert(cap_reply_cap_get_capReplyMaster(masterCap));
362    assert(cap_reply_cap_get_capReplyCanGrant(masterCap));
363    assert(TCB_PTR(cap_reply_cap_get_capTCBPtr(masterCap)) == sender);
364    callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
365    callerCap = callerSlot->cap;
366    /* Haskell error: "Caller cap must not already exist" */
367    assert(cap_get_capType(callerCap) == cap_null_cap);
368    cteInsert(cap_reply_cap_new(canGrant, false, TCB_REF(sender)),
369              replySlot, callerSlot);
370}
371
372void deleteCallerCap(tcb_t *receiver)
373{
374    cte_t *callerSlot;
375
376    callerSlot = TCB_PTR_CTE_PTR(receiver, tcbCaller);
377    /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
378    cteDeleteOne(callerSlot);
379}
380#endif
381
382extra_caps_t current_extra_caps;
383
384exception_t lookupExtraCaps(tcb_t *thread, word_t *bufferPtr, seL4_MessageInfo_t info)
385{
386    lookupSlot_raw_ret_t lu_ret;
387    cptr_t cptr;
388    word_t i, length;
389
390    if (!bufferPtr) {
391        current_extra_caps.excaprefs[0] = NULL;
392        return EXCEPTION_NONE;
393    }
394
395    length = seL4_MessageInfo_get_extraCaps(info);
396
397    for (i = 0; i < length; i++) {
398        cptr = getExtraCPtr(bufferPtr, i);
399
400        lu_ret = lookupSlot(thread, cptr);
401        if (lu_ret.status != EXCEPTION_NONE) {
402            current_fault = seL4_Fault_CapFault_new(cptr, false);
403            return lu_ret.status;
404        }
405
406        current_extra_caps.excaprefs[i] = lu_ret.slot;
407    }
408    if (i < seL4_MsgMaxExtraCaps) {
409        current_extra_caps.excaprefs[i] = NULL;
410    }
411
412    return EXCEPTION_NONE;
413}
414
415/* Copy IPC MRs from one thread to another */
416word_t copyMRs(tcb_t *sender, word_t *sendBuf, tcb_t *receiver,
417               word_t *recvBuf, word_t n)
418{
419    word_t i;
420
421    /* Copy inline words */
422    for (i = 0; i < n && i < n_msgRegisters; i++) {
423        setRegister(receiver, msgRegisters[i],
424                    getRegister(sender, msgRegisters[i]));
425    }
426
427    if (!recvBuf || !sendBuf) {
428        return i;
429    }
430
431    /* Copy out-of-line words */
432    for (; i < n; i++) {
433        recvBuf[i + 1] = sendBuf[i + 1];
434    }
435
436    return i;
437}
438
439#ifdef ENABLE_SMP_SUPPORT
440/* This checks if the current updated to scheduler queue is changing the previous scheduling
441 * decision made by the scheduler. If its a case, an `irq_reschedule_ipi` is sent */
442void remoteQueueUpdate(tcb_t *tcb)
443{
444    /* only ipi if the target is for the current domain */
445    if (tcb->tcbAffinity != getCurrentCPUIndex() && tcb->tcbDomain == ksCurDomain) {
446        tcb_t *targetCurThread = NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity);
447
448        /* reschedule if the target core is idle or we are waking a higher priority thread (or
449         * if a new irq would need to be set on MCS) */
450        if (targetCurThread == NODE_STATE_ON_CORE(ksIdleThread, tcb->tcbAffinity)  ||
451            tcb->tcbPriority > targetCurThread->tcbPriority
452#ifdef CONFIG_KERNEL_MCS
453            || NODE_STATE_ON_CORE(ksReprogram, tcb->tcbAffinity)
454#endif
455           ) {
456            ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
457        }
458    }
459}
460
461/* This makes sure the the TCB is not being run on other core.
462 * It would request 'IpiRemoteCall_Stall' to switch the core from this TCB
463 * We also request the 'irq_reschedule_ipi' to restore the state of target core */
464void remoteTCBStall(tcb_t *tcb)
465{
466
467    if (
468#ifdef CONFIG_KERNEL_MCS
469        tcb->tcbSchedContext &&
470#endif
471        tcb->tcbAffinity != getCurrentCPUIndex() &&
472        NODE_STATE_ON_CORE(ksCurThread, tcb->tcbAffinity) == tcb) {
473        doRemoteStall(tcb->tcbAffinity);
474        ARCH_NODE_STATE(ipiReschedulePending) |= BIT(tcb->tcbAffinity);
475    }
476}
477
478#ifndef CONFIG_KERNEL_MCS
479static exception_t invokeTCB_SetAffinity(tcb_t *thread, word_t affinity)
480{
481    /* remove the tcb from scheduler queue in case it is already in one
482     * and add it to new queue if required */
483    tcbSchedDequeue(thread);
484    migrateTCB(thread, affinity);
485    if (isRunnable(thread)) {
486        SCHED_APPEND(thread);
487    }
488    /* reschedule current cpu if tcb moves itself */
489    if (thread == NODE_STATE(ksCurThread)) {
490        rescheduleRequired();
491    }
492    return EXCEPTION_NONE;
493}
494
495static exception_t decodeSetAffinity(cap_t cap, word_t length, word_t *buffer)
496{
497    tcb_t *tcb;
498    word_t affinity;
499
500    if (length < 1) {
501        userError("TCB SetAffinity: Truncated message.");
502        current_syscall_error.type = seL4_TruncatedMessage;
503        return EXCEPTION_SYSCALL_ERROR;
504    }
505
506    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
507
508    affinity = getSyscallArg(0, buffer);
509    if (affinity >= ksNumCPUs) {
510        userError("TCB SetAffinity: Requested CPU does not exist.");
511        current_syscall_error.type = seL4_IllegalOperation;
512        return EXCEPTION_SYSCALL_ERROR;
513    }
514
515    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
516    return invokeTCB_SetAffinity(tcb, affinity);
517}
518#endif
519#endif /* ENABLE_SMP_SUPPORT */
520
521#ifdef CONFIG_HARDWARE_DEBUG_API
522static exception_t invokeConfigureSingleStepping(word_t *buffer, tcb_t *t,
523                                                 uint16_t bp_num, word_t n_instrs)
524{
525    bool_t bp_was_consumed;
526
527    bp_was_consumed = configureSingleStepping(t, bp_num, n_instrs, false);
528    if (n_instrs == 0) {
529        unsetBreakpointUsedFlag(t, bp_num);
530        setMR(NODE_STATE(ksCurThread), buffer, 0, false);
531    } else {
532        setBreakpointUsedFlag(t, bp_num);
533        setMR(NODE_STATE(ksCurThread), buffer, 0, bp_was_consumed);
534    }
535    return EXCEPTION_NONE;
536}
537
538static exception_t decodeConfigureSingleStepping(cap_t cap, word_t *buffer)
539{
540    uint16_t bp_num;
541    word_t n_instrs;
542    tcb_t *tcb;
543    syscall_error_t syserr;
544
545    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
546
547    bp_num = getSyscallArg(0, buffer);
548    n_instrs = getSyscallArg(1, buffer);
549
550    syserr = Arch_decodeConfigureSingleStepping(tcb, bp_num, n_instrs, false);
551    if (syserr.type != seL4_NoError) {
552        current_syscall_error = syserr;
553        return EXCEPTION_SYSCALL_ERROR;
554    }
555
556    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
557    return invokeConfigureSingleStepping(buffer, tcb, bp_num, n_instrs);
558}
559
560static exception_t invokeSetBreakpoint(tcb_t *tcb, uint16_t bp_num,
561                                       word_t vaddr, word_t type, word_t size, word_t rw)
562{
563    setBreakpoint(tcb, bp_num, vaddr, type, size, rw);
564    /* Signal restore_user_context() to pop the breakpoint context on return. */
565    setBreakpointUsedFlag(tcb, bp_num);
566    return EXCEPTION_NONE;
567}
568
569static exception_t decodeSetBreakpoint(cap_t cap, word_t *buffer)
570{
571    uint16_t bp_num;
572    word_t vaddr, type, size, rw;
573    tcb_t *tcb;
574    syscall_error_t error;
575
576    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
577    bp_num = getSyscallArg(0, buffer);
578    vaddr = getSyscallArg(1, buffer);
579    type = getSyscallArg(2, buffer);
580    size = getSyscallArg(3, buffer);
581    rw = getSyscallArg(4, buffer);
582
583    /* We disallow the user to set breakpoint addresses that are in the kernel
584     * vaddr range.
585     */
586    if (vaddr >= (word_t)USER_TOP) {
587        userError("Debug: Invalid address %lx: bp addresses must be userspace "
588                  "addresses.",
589                  vaddr);
590        current_syscall_error.type = seL4_InvalidArgument;
591        current_syscall_error.invalidArgumentNumber = 1;
592        return EXCEPTION_SYSCALL_ERROR;
593    }
594
595    if (type != seL4_InstructionBreakpoint && type != seL4_DataBreakpoint) {
596        userError("Debug: Unknown breakpoint type %lx.", type);
597        current_syscall_error.type = seL4_InvalidArgument;
598        current_syscall_error.invalidArgumentNumber = 2;
599        return EXCEPTION_SYSCALL_ERROR;
600    } else if (type == seL4_InstructionBreakpoint) {
601        if (size != 0) {
602            userError("Debug: Instruction bps must have size of 0.");
603            current_syscall_error.type = seL4_InvalidArgument;
604            current_syscall_error.invalidArgumentNumber = 3;
605            return EXCEPTION_SYSCALL_ERROR;
606        }
607        if (rw != seL4_BreakOnRead) {
608            userError("Debug: Instruction bps must be break-on-read.");
609            current_syscall_error.type = seL4_InvalidArgument;
610            current_syscall_error.invalidArgumentNumber = 4;
611            return EXCEPTION_SYSCALL_ERROR;
612        }
613        if ((seL4_FirstWatchpoint == -1 || bp_num >= seL4_FirstWatchpoint)
614            && seL4_FirstBreakpoint != seL4_FirstWatchpoint) {
615            userError("Debug: Can't specify a watchpoint ID with type seL4_InstructionBreakpoint.");
616            current_syscall_error.type = seL4_InvalidArgument;
617            current_syscall_error.invalidArgumentNumber = 2;
618            return EXCEPTION_SYSCALL_ERROR;
619        }
620    } else if (type == seL4_DataBreakpoint) {
621        if (size == 0) {
622            userError("Debug: Data bps cannot have size of 0.");
623            current_syscall_error.type = seL4_InvalidArgument;
624            current_syscall_error.invalidArgumentNumber = 3;
625            return EXCEPTION_SYSCALL_ERROR;
626        }
627        if (seL4_FirstWatchpoint != -1 && bp_num < seL4_FirstWatchpoint) {
628            userError("Debug: Data watchpoints cannot specify non-data watchpoint ID.");
629            current_syscall_error.type = seL4_InvalidArgument;
630            current_syscall_error.invalidArgumentNumber = 2;
631            return EXCEPTION_SYSCALL_ERROR;
632        }
633    } else if (type == seL4_SoftwareBreakRequest) {
634        userError("Debug: Use a software breakpoint instruction to trigger a "
635                  "software breakpoint.");
636        current_syscall_error.type = seL4_InvalidArgument;
637        current_syscall_error.invalidArgumentNumber = 2;
638        return EXCEPTION_SYSCALL_ERROR;
639    }
640
641    if (rw != seL4_BreakOnRead && rw != seL4_BreakOnWrite
642        && rw != seL4_BreakOnReadWrite) {
643        userError("Debug: Unknown access-type %lu.", rw);
644        current_syscall_error.type = seL4_InvalidArgument;
645        current_syscall_error.invalidArgumentNumber = 3;
646        return EXCEPTION_SYSCALL_ERROR;
647    }
648    if (size != 0 && size != 1 && size != 2 && size != 4 && size != 8) {
649        userError("Debug: Invalid size %lu.", size);
650        current_syscall_error.type = seL4_InvalidArgument;
651        current_syscall_error.invalidArgumentNumber = 3;
652        return EXCEPTION_SYSCALL_ERROR;
653    }
654    if (size > 0 && vaddr & (size - 1)) {
655        /* Just Don't allow unaligned watchpoints. They are undefined
656         * both ARM and x86.
657         *
658         * X86: Intel manuals, vol3, 17.2.5:
659         *  "Two-byte ranges must be aligned on word boundaries; 4-byte
660         *   ranges must be aligned on doubleword boundaries"
661         *  "Unaligned data or I/O breakpoint addresses do not yield valid
662         *   results"
663         *
664         * ARM: ARMv7 manual, C11.11.44:
665         *  "A DBGWVR is programmed with a word-aligned address."
666         */
667        userError("Debug: Unaligned data watchpoint address %lx (size %lx) "
668                  "rejected.\n",
669                  vaddr, size);
670
671        current_syscall_error.type = seL4_AlignmentError;
672        return EXCEPTION_SYSCALL_ERROR;
673    }
674
675    error = Arch_decodeSetBreakpoint(tcb, bp_num, vaddr, type, size, rw);
676    if (error.type != seL4_NoError) {
677        current_syscall_error = error;
678        return EXCEPTION_SYSCALL_ERROR;
679    }
680
681    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
682    return invokeSetBreakpoint(tcb, bp_num,
683                               vaddr, type, size, rw);
684}
685
686static exception_t invokeGetBreakpoint(word_t *buffer, tcb_t *tcb, uint16_t bp_num)
687{
688    getBreakpoint_t res;
689
690    res = getBreakpoint(tcb, bp_num);
691    setMR(NODE_STATE(ksCurThread), buffer, 0, res.vaddr);
692    setMR(NODE_STATE(ksCurThread), buffer, 1, res.type);
693    setMR(NODE_STATE(ksCurThread), buffer, 2, res.size);
694    setMR(NODE_STATE(ksCurThread), buffer, 3, res.rw);
695    setMR(NODE_STATE(ksCurThread), buffer, 4, res.is_enabled);
696    return EXCEPTION_NONE;
697}
698
699static exception_t decodeGetBreakpoint(cap_t cap, word_t *buffer)
700{
701    tcb_t *tcb;
702    uint16_t bp_num;
703    syscall_error_t error;
704
705    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
706    bp_num = getSyscallArg(0, buffer);
707
708    error = Arch_decodeGetBreakpoint(tcb, bp_num);
709    if (error.type != seL4_NoError) {
710        current_syscall_error = error;
711        return EXCEPTION_SYSCALL_ERROR;
712    }
713
714    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
715    return invokeGetBreakpoint(buffer, tcb, bp_num);
716}
717
718static exception_t invokeUnsetBreakpoint(tcb_t *tcb, uint16_t bp_num)
719{
720    /* Maintain the bitfield of in-use breakpoints. */
721    unsetBreakpoint(tcb, bp_num);
722    unsetBreakpointUsedFlag(tcb, bp_num);
723    return EXCEPTION_NONE;
724}
725
726static exception_t decodeUnsetBreakpoint(cap_t cap, word_t *buffer)
727{
728    tcb_t *tcb;
729    uint16_t bp_num;
730    syscall_error_t error;
731
732    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
733    bp_num = getSyscallArg(0, buffer);
734
735    error = Arch_decodeUnsetBreakpoint(tcb, bp_num);
736    if (error.type != seL4_NoError) {
737        current_syscall_error = error;
738        return EXCEPTION_SYSCALL_ERROR;
739    }
740
741    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
742    return invokeUnsetBreakpoint(tcb, bp_num);
743}
744#endif /* CONFIG_HARDWARE_DEBUG_API */
745
746static exception_t invokeSetTLSBase(tcb_t *thread, word_t tls_base)
747{
748    setRegister(thread, TLS_BASE, tls_base);
749    if (thread == NODE_STATE(ksCurThread)) {
750        /* If this is the current thread force a reschedule to ensure that any changes
751         * to the TLS_BASE are realized */
752        rescheduleRequired();
753    }
754
755    return EXCEPTION_NONE;
756}
757
758static exception_t decodeSetTLSBase(cap_t cap, word_t length, word_t *buffer)
759{
760    word_t tls_base;
761
762    if (length < 1) {
763        userError("TCB SetTLSBase: Truncated message.");
764        current_syscall_error.type = seL4_TruncatedMessage;
765        return EXCEPTION_SYSCALL_ERROR;
766    }
767
768    tls_base = getSyscallArg(0, buffer);
769
770    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
771    return invokeSetTLSBase(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), tls_base);
772}
773
774/* The following functions sit in the syscall error monad, but include the
775 * exception cases for the preemptible bottom end, as they call the invoke
776 * functions directly.  This is a significant deviation from the Haskell
777 * spec. */
778exception_t decodeTCBInvocation(word_t invLabel, word_t length, cap_t cap,
779                                cte_t *slot, extra_caps_t excaps, bool_t call,
780                                word_t *buffer)
781{
782    /* Stall the core if we are operating on a remote TCB that is currently running */
783    SMP_COND_STATEMENT(remoteTCBStall(TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));)
784
785    switch (invLabel) {
786    case TCBReadRegisters:
787        /* Second level of decoding */
788        return decodeReadRegisters(cap, length, call, buffer);
789
790    case TCBWriteRegisters:
791        return decodeWriteRegisters(cap, length, buffer);
792
793    case TCBCopyRegisters:
794        return decodeCopyRegisters(cap, length, excaps, buffer);
795
796    case TCBSuspend:
797        /* Jump straight to the invoke */
798        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
799        return invokeTCB_Suspend(
800                   TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
801
802    case TCBResume:
803        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
804        return invokeTCB_Resume(
805                   TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
806
807    case TCBConfigure:
808        return decodeTCBConfigure(cap, length, slot, excaps, buffer);
809
810    case TCBSetPriority:
811        return decodeSetPriority(cap, length, excaps, buffer);
812
813    case TCBSetMCPriority:
814        return decodeSetMCPriority(cap, length, excaps, buffer);
815
816    case TCBSetSchedParams:
817#ifdef CONFIG_KERNEL_MCS
818        return decodeSetSchedParams(cap, length, slot, excaps, buffer);
819#else
820        return decodeSetSchedParams(cap, length, excaps, buffer);
821#endif
822
823    case TCBSetIPCBuffer:
824        return decodeSetIPCBuffer(cap, length, slot, excaps, buffer);
825
826    case TCBSetSpace:
827        return decodeSetSpace(cap, length, slot, excaps, buffer);
828
829    case TCBBindNotification:
830        return decodeBindNotification(cap, excaps);
831
832    case TCBUnbindNotification:
833        return decodeUnbindNotification(cap);
834
835#ifdef CONFIG_KERNEL_MCS
836    case TCBSetTimeoutEndpoint:
837        return decodeSetTimeoutEndpoint(cap, slot, excaps);
838#else
839#ifdef ENABLE_SMP_SUPPORT
840    case TCBSetAffinity:
841        return decodeSetAffinity(cap, length, buffer);
842#endif /* ENABLE_SMP_SUPPORT */
843#endif
844
845        /* There is no notion of arch specific TCB invocations so this needs to go here */
846#ifdef CONFIG_VTX
847    case TCBSetEPTRoot:
848        return decodeSetEPTRoot(cap, excaps);
849#endif
850
851#ifdef CONFIG_HARDWARE_DEBUG_API
852    case TCBConfigureSingleStepping:
853        return decodeConfigureSingleStepping(cap, buffer);
854
855    case TCBSetBreakpoint:
856        return decodeSetBreakpoint(cap, buffer);
857
858    case TCBGetBreakpoint:
859        return decodeGetBreakpoint(cap, buffer);
860
861    case TCBUnsetBreakpoint:
862        return decodeUnsetBreakpoint(cap, buffer);
863#endif
864
865    case TCBSetTLSBase:
866        return decodeSetTLSBase(cap, length, buffer);
867
868    default:
869        /* Haskell: "throw IllegalOperation" */
870        userError("TCB: Illegal operation.");
871        current_syscall_error.type = seL4_IllegalOperation;
872        return EXCEPTION_SYSCALL_ERROR;
873    }
874}
875
876enum CopyRegistersFlags {
877    CopyRegisters_suspendSource = 0,
878    CopyRegisters_resumeTarget = 1,
879    CopyRegisters_transferFrame = 2,
880    CopyRegisters_transferInteger = 3
881};
882
883exception_t decodeCopyRegisters(cap_t cap, word_t length,
884                                extra_caps_t excaps, word_t *buffer)
885{
886    word_t transferArch;
887    tcb_t *srcTCB;
888    cap_t source_cap;
889    word_t flags;
890
891    if (length < 1 || excaps.excaprefs[0] == NULL) {
892        userError("TCB CopyRegisters: Truncated message.");
893        current_syscall_error.type = seL4_TruncatedMessage;
894        return EXCEPTION_SYSCALL_ERROR;
895    }
896
897    flags = getSyscallArg(0, buffer);
898
899    transferArch = Arch_decodeTransfer(flags >> 8);
900
901    source_cap = excaps.excaprefs[0]->cap;
902
903    if (cap_get_capType(source_cap) == cap_thread_cap) {
904        srcTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(source_cap));
905    } else {
906        userError("TCB CopyRegisters: Invalid source TCB.");
907        current_syscall_error.type = seL4_InvalidCapability;
908        current_syscall_error.invalidCapNumber = 1;
909        return EXCEPTION_SYSCALL_ERROR;
910    }
911
912    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
913    return invokeTCB_CopyRegisters(
914               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), srcTCB,
915               flags & BIT(CopyRegisters_suspendSource),
916               flags & BIT(CopyRegisters_resumeTarget),
917               flags & BIT(CopyRegisters_transferFrame),
918               flags & BIT(CopyRegisters_transferInteger),
919               transferArch);
920
921}
922
923enum ReadRegistersFlags {
924    ReadRegisters_suspend = 0
925};
926
927exception_t decodeReadRegisters(cap_t cap, word_t length, bool_t call,
928                                word_t *buffer)
929{
930    word_t transferArch, flags, n;
931    tcb_t *thread;
932
933    if (length < 2) {
934        userError("TCB ReadRegisters: Truncated message.");
935        current_syscall_error.type = seL4_TruncatedMessage;
936        return EXCEPTION_SYSCALL_ERROR;
937    }
938
939    flags = getSyscallArg(0, buffer);
940    n     = getSyscallArg(1, buffer);
941
942    if (n < 1 || n > n_frameRegisters + n_gpRegisters) {
943        userError("TCB ReadRegisters: Attempted to read an invalid number of registers (%d).",
944                  (int)n);
945        current_syscall_error.type = seL4_RangeError;
946        current_syscall_error.rangeErrorMin = 1;
947        current_syscall_error.rangeErrorMax = n_frameRegisters +
948                                              n_gpRegisters;
949        return EXCEPTION_SYSCALL_ERROR;
950    }
951
952    transferArch = Arch_decodeTransfer(flags >> 8);
953
954    thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
955    if (thread == NODE_STATE(ksCurThread)) {
956        userError("TCB ReadRegisters: Attempted to read our own registers.");
957        current_syscall_error.type = seL4_IllegalOperation;
958        return EXCEPTION_SYSCALL_ERROR;
959    }
960
961    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
962    return invokeTCB_ReadRegisters(
963               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)),
964               flags & BIT(ReadRegisters_suspend),
965               n, transferArch, call);
966}
967
968enum WriteRegistersFlags {
969    WriteRegisters_resume = 0
970};
971
972exception_t decodeWriteRegisters(cap_t cap, word_t length, word_t *buffer)
973{
974    word_t flags, w;
975    word_t transferArch;
976    tcb_t *thread;
977
978    if (length < 2) {
979        userError("TCB WriteRegisters: Truncated message.");
980        current_syscall_error.type = seL4_TruncatedMessage;
981        return EXCEPTION_SYSCALL_ERROR;
982    }
983
984    flags = getSyscallArg(0, buffer);
985    w     = getSyscallArg(1, buffer);
986
987    if (length - 2 < w) {
988        userError("TCB WriteRegisters: Message too short for requested write size (%d/%d).",
989                  (int)(length - 2), (int)w);
990        current_syscall_error.type = seL4_TruncatedMessage;
991        return EXCEPTION_SYSCALL_ERROR;
992    }
993
994    transferArch = Arch_decodeTransfer(flags >> 8);
995
996    thread = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
997    if (thread == NODE_STATE(ksCurThread)) {
998        userError("TCB WriteRegisters: Attempted to write our own registers.");
999        current_syscall_error.type = seL4_IllegalOperation;
1000        return EXCEPTION_SYSCALL_ERROR;
1001    }
1002
1003    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1004    return invokeTCB_WriteRegisters(thread,
1005                                    flags & BIT(WriteRegisters_resume),
1006                                    w, transferArch, buffer);
1007}
1008
1009#ifdef CONFIG_KERNEL_MCS
1010static bool_t validFaultHandler(cap_t cap)
1011{
1012    switch (cap_get_capType(cap)) {
1013    case cap_endpoint_cap:
1014        if (!cap_endpoint_cap_get_capCanSend(cap) ||
1015            (!cap_endpoint_cap_get_capCanGrant(cap) &&
1016             !cap_endpoint_cap_get_capCanGrantReply(cap))) {
1017            current_syscall_error.type = seL4_InvalidCapability;
1018            return false;
1019        }
1020        break;
1021    case cap_null_cap:
1022        /* just has no fault endpoint */
1023        break;
1024    default:
1025        current_syscall_error.type = seL4_InvalidCapability;
1026        return false;
1027    }
1028    return true;
1029}
1030#endif
1031
1032/* TCBConfigure batches SetIPCBuffer and parts of SetSpace. */
1033exception_t decodeTCBConfigure(cap_t cap, word_t length, cte_t *slot,
1034                               extra_caps_t rootCaps, word_t *buffer)
1035{
1036    cte_t *bufferSlot, *cRootSlot, *vRootSlot;
1037    cap_t bufferCap, cRootCap, vRootCap;
1038    deriveCap_ret_t dc_ret;
1039    word_t cRootData, vRootData, bufferAddr;
1040#ifdef CONFIG_KERNEL_MCS
1041#define TCBCONFIGURE_ARGS 3
1042#else
1043#define TCBCONFIGURE_ARGS 4
1044#endif
1045    if (length < TCBCONFIGURE_ARGS || rootCaps.excaprefs[0] == NULL
1046        || rootCaps.excaprefs[1] == NULL
1047        || rootCaps.excaprefs[2] == NULL) {
1048        userError("TCB Configure: Truncated message.");
1049        current_syscall_error.type = seL4_TruncatedMessage;
1050        return EXCEPTION_SYSCALL_ERROR;
1051    }
1052
1053#ifdef CONFIG_KERNEL_MCS
1054    cRootData     = getSyscallArg(0, buffer);
1055    vRootData     = getSyscallArg(1, buffer);
1056    bufferAddr    = getSyscallArg(2, buffer);
1057#else
1058    cptr_t faultEP       = getSyscallArg(0, buffer);
1059    cRootData     = getSyscallArg(1, buffer);
1060    vRootData     = getSyscallArg(2, buffer);
1061    bufferAddr    = getSyscallArg(3, buffer);
1062#endif
1063
1064    cRootSlot  = rootCaps.excaprefs[0];
1065    cRootCap   = rootCaps.excaprefs[0]->cap;
1066    vRootSlot  = rootCaps.excaprefs[1];
1067    vRootCap   = rootCaps.excaprefs[1]->cap;
1068    bufferSlot = rootCaps.excaprefs[2];
1069    bufferCap  = rootCaps.excaprefs[2]->cap;
1070
1071    if (bufferAddr == 0) {
1072        bufferSlot = NULL;
1073    } else {
1074        dc_ret = deriveCap(bufferSlot, bufferCap);
1075        if (dc_ret.status != EXCEPTION_NONE) {
1076            return dc_ret.status;
1077        }
1078        bufferCap = dc_ret.cap;
1079
1080        exception_t e = checkValidIPCBuffer(bufferAddr, bufferCap);
1081        if (e != EXCEPTION_NONE) {
1082            return e;
1083        }
1084    }
1085
1086    if (slotCapLongRunningDelete(
1087            TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
1088        slotCapLongRunningDelete(
1089            TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
1090        userError("TCB Configure: CSpace or VSpace currently being deleted.");
1091        current_syscall_error.type = seL4_IllegalOperation;
1092        return EXCEPTION_SYSCALL_ERROR;
1093    }
1094
1095    if (cRootData != 0) {
1096        cRootCap = updateCapData(false, cRootData, cRootCap);
1097    }
1098
1099    dc_ret = deriveCap(cRootSlot, cRootCap);
1100    if (dc_ret.status != EXCEPTION_NONE) {
1101        return dc_ret.status;
1102    }
1103    cRootCap = dc_ret.cap;
1104
1105    if (cap_get_capType(cRootCap) != cap_cnode_cap) {
1106        userError("TCB Configure: CSpace cap is invalid.");
1107        current_syscall_error.type = seL4_IllegalOperation;
1108        return EXCEPTION_SYSCALL_ERROR;
1109    }
1110
1111    if (vRootData != 0) {
1112        vRootCap = updateCapData(false, vRootData, vRootCap);
1113    }
1114
1115    dc_ret = deriveCap(vRootSlot, vRootCap);
1116    if (dc_ret.status != EXCEPTION_NONE) {
1117        return dc_ret.status;
1118    }
1119    vRootCap = dc_ret.cap;
1120
1121    if (!isValidVTableRoot(vRootCap)) {
1122        userError("TCB Configure: VSpace cap is invalid.");
1123        current_syscall_error.type = seL4_IllegalOperation;
1124        return EXCEPTION_SYSCALL_ERROR;
1125    }
1126
1127    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1128#ifdef CONFIG_KERNEL_MCS
1129    return invokeTCB_ThreadControlCaps(
1130               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1131               cap_null_cap_new(), NULL,
1132               cap_null_cap_new(), NULL,
1133               cRootCap, cRootSlot,
1134               vRootCap, vRootSlot,
1135               bufferAddr, bufferCap,
1136               bufferSlot, thread_control_caps_update_space |
1137               thread_control_caps_update_ipc_buffer);
1138#else
1139    return invokeTCB_ThreadControl(
1140               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1141               faultEP, NULL_PRIO, NULL_PRIO,
1142               cRootCap, cRootSlot,
1143               vRootCap, vRootSlot,
1144               bufferAddr, bufferCap,
1145               bufferSlot, thread_control_update_space |
1146               thread_control_update_ipc_buffer);
1147#endif
1148}
1149
1150exception_t decodeSetPriority(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1151{
1152    if (length < 1 || excaps.excaprefs[0] == NULL) {
1153        userError("TCB SetPriority: Truncated message.");
1154        current_syscall_error.type = seL4_TruncatedMessage;
1155        return EXCEPTION_SYSCALL_ERROR;
1156    }
1157
1158    prio_t newPrio = getSyscallArg(0, buffer);
1159    cap_t authCap = excaps.excaprefs[0]->cap;
1160
1161    if (cap_get_capType(authCap) != cap_thread_cap) {
1162        userError("Set priority: authority cap not a TCB.");
1163        current_syscall_error.type = seL4_InvalidCapability;
1164        current_syscall_error.invalidCapNumber = 1;
1165        return EXCEPTION_SYSCALL_ERROR;
1166    }
1167
1168    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1169    exception_t status = checkPrio(newPrio, authTCB);
1170    if (status != EXCEPTION_NONE) {
1171        userError("TCB SetPriority: Requested priority %lu too high (max %lu).",
1172                  (unsigned long) newPrio, (unsigned long) authTCB->tcbMCP);
1173        return status;
1174    }
1175
1176    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1177#ifdef CONFIG_KERNEL_MCS
1178    return invokeTCB_ThreadControlSched(
1179               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1180               cap_null_cap_new(), NULL,
1181               NULL_PRIO, newPrio,
1182               NULL, thread_control_sched_update_priority);
1183#else
1184    return invokeTCB_ThreadControl(
1185               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1186               0, NULL_PRIO, newPrio,
1187               cap_null_cap_new(), NULL,
1188               cap_null_cap_new(), NULL,
1189               0, cap_null_cap_new(),
1190               NULL, thread_control_update_priority);
1191#endif
1192}
1193
1194exception_t decodeSetMCPriority(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1195{
1196    if (length < 1 || excaps.excaprefs[0] == NULL) {
1197        userError("TCB SetMCPriority: Truncated message.");
1198        current_syscall_error.type = seL4_TruncatedMessage;
1199        return EXCEPTION_SYSCALL_ERROR;
1200    }
1201
1202    prio_t newMcp = getSyscallArg(0, buffer);
1203    cap_t authCap = excaps.excaprefs[0]->cap;
1204
1205    if (cap_get_capType(authCap) != cap_thread_cap) {
1206        userError("TCB SetMCPriority: authority cap not a TCB.");
1207        current_syscall_error.type = seL4_InvalidCapability;
1208        current_syscall_error.invalidCapNumber = 1;
1209        return EXCEPTION_SYSCALL_ERROR;
1210    }
1211
1212    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1213    exception_t status = checkPrio(newMcp, authTCB);
1214    if (status != EXCEPTION_NONE) {
1215        userError("TCB SetMCPriority: Requested maximum controlled priority %lu too high (max %lu).",
1216                  (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1217        return status;
1218    }
1219
1220    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1221#ifdef CONFIG_KERNEL_MCS
1222    return invokeTCB_ThreadControlSched(
1223               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1224               cap_null_cap_new(), NULL,
1225               newMcp, NULL_PRIO,
1226               NULL, thread_control_sched_update_mcp);
1227#else
1228    return invokeTCB_ThreadControl(
1229               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1230               0, newMcp, NULL_PRIO,
1231               cap_null_cap_new(), NULL,
1232               cap_null_cap_new(), NULL,
1233               0, cap_null_cap_new(),
1234               NULL, thread_control_update_mcp);
1235#endif
1236}
1237
1238#ifdef CONFIG_KERNEL_MCS
1239exception_t decodeSetTimeoutEndpoint(cap_t cap, cte_t *slot, extra_caps_t excaps)
1240{
1241    if (excaps.excaprefs[0] == NULL) {
1242        userError("TCB SetSchedParams: Truncated message.");
1243        return EXCEPTION_SYSCALL_ERROR;
1244    }
1245
1246    cte_t *thSlot = excaps.excaprefs[0];
1247    cap_t thCap   = excaps.excaprefs[0]->cap;
1248
1249    /* timeout handler */
1250    if (!validFaultHandler(thCap)) {
1251        userError("TCB SetTimeoutEndpoint: timeout endpoint cap invalid.");
1252        current_syscall_error.invalidCapNumber = 1;
1253        return EXCEPTION_SYSCALL_ERROR;
1254    }
1255
1256    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1257    return invokeTCB_ThreadControlCaps(
1258               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1259               cap_null_cap_new(), NULL,
1260               thCap, thSlot,
1261               cap_null_cap_new(), NULL,
1262               cap_null_cap_new(), NULL,
1263               0, cap_null_cap_new(), NULL,
1264               thread_control_caps_update_timeout);
1265}
1266#endif
1267
1268#ifdef CONFIG_KERNEL_MCS
1269exception_t decodeSetSchedParams(cap_t cap, word_t length, cte_t *slot, extra_caps_t excaps, word_t *buffer)
1270#else
1271exception_t decodeSetSchedParams(cap_t cap, word_t length, extra_caps_t excaps, word_t *buffer)
1272#endif
1273{
1274    if (length < 2 || excaps.excaprefs[0] == NULL
1275#ifdef CONFIG_KERNEL_MCS
1276        || excaps.excaprefs[1] == NULL || excaps.excaprefs[2] == NULL
1277#endif
1278       ) {
1279        userError("TCB SetSchedParams: Truncated message.");
1280        current_syscall_error.type = seL4_TruncatedMessage;
1281        return EXCEPTION_SYSCALL_ERROR;
1282    }
1283
1284    prio_t newMcp = getSyscallArg(0, buffer);
1285    prio_t newPrio = getSyscallArg(1, buffer);
1286    cap_t authCap = excaps.excaprefs[0]->cap;
1287#ifdef CONFIG_KERNEL_MCS
1288    cap_t scCap   = excaps.excaprefs[1]->cap;
1289    cte_t *fhSlot = excaps.excaprefs[2];
1290    cap_t fhCap   = excaps.excaprefs[2]->cap;
1291#endif
1292
1293    if (cap_get_capType(authCap) != cap_thread_cap) {
1294        userError("TCB SetSchedParams: authority cap not a TCB.");
1295        current_syscall_error.type = seL4_InvalidCapability;
1296        current_syscall_error.invalidCapNumber = 1;
1297        return EXCEPTION_SYSCALL_ERROR;
1298    }
1299
1300    tcb_t *authTCB = TCB_PTR(cap_thread_cap_get_capTCBPtr(authCap));
1301    exception_t status = checkPrio(newMcp, authTCB);
1302    if (status != EXCEPTION_NONE) {
1303        userError("TCB SetSchedParams: Requested maximum controlled priority %lu too high (max %lu).",
1304                  (unsigned long) newMcp, (unsigned long) authTCB->tcbMCP);
1305        return status;
1306    }
1307
1308    status = checkPrio(newPrio, authTCB);
1309    if (status != EXCEPTION_NONE) {
1310        userError("TCB SetSchedParams: Requested priority %lu too high (max %lu).",
1311                  (unsigned long) newPrio, (unsigned long) authTCB->tcbMCP);
1312        return status;
1313    }
1314
1315#ifdef CONFIG_KERNEL_MCS
1316    tcb_t *tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1317    sched_context_t *sc = NULL;
1318    switch (cap_get_capType(scCap)) {
1319    case cap_sched_context_cap:
1320        sc = SC_PTR(cap_sched_context_cap_get_capSCPtr(scCap));
1321        if (tcb->tcbSchedContext) {
1322            userError("TCB Configure: tcb already has a scheduling context.");
1323            current_syscall_error.type = seL4_IllegalOperation;
1324            return EXCEPTION_SYSCALL_ERROR;
1325        }
1326        if (sc->scTcb) {
1327            userError("TCB Configure: sched contextext already bound.");
1328            current_syscall_error.type = seL4_IllegalOperation;
1329            return EXCEPTION_SYSCALL_ERROR;
1330        }
1331        break;
1332    case cap_null_cap:
1333        if (tcb == NODE_STATE(ksCurThread)) {
1334            userError("TCB SetSchedParams: Cannot change sched_context of current thread");
1335            current_syscall_error.type = seL4_IllegalOperation;
1336            return EXCEPTION_SYSCALL_ERROR;
1337        }
1338        break;
1339    default:
1340        userError("TCB Configure: sched context cap invalid.");
1341        current_syscall_error.type = seL4_InvalidCapability;
1342        current_syscall_error.invalidCapNumber = 2;
1343        return EXCEPTION_SYSCALL_ERROR;
1344    }
1345
1346    if (!validFaultHandler(fhCap)) {
1347        userError("TCB Configure: fault endpoint cap invalid.");
1348        current_syscall_error.type = seL4_InvalidCapability;
1349        current_syscall_error.invalidCapNumber = 3;
1350        return EXCEPTION_SYSCALL_ERROR;
1351    }
1352#endif
1353    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1354#ifdef CONFIG_KERNEL_MCS
1355    return invokeTCB_ThreadControlSched(
1356               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1357               fhCap, fhSlot,
1358               newMcp, newPrio,
1359               sc,
1360               thread_control_sched_update_mcp |
1361               thread_control_sched_update_priority |
1362               thread_control_sched_update_sc |
1363               thread_control_sched_update_fault);
1364#else
1365    return invokeTCB_ThreadControl(
1366               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), NULL,
1367               0, newMcp, newPrio,
1368               cap_null_cap_new(), NULL,
1369               cap_null_cap_new(), NULL,
1370               0, cap_null_cap_new(),
1371               NULL, thread_control_update_mcp |
1372               thread_control_update_priority);
1373#endif
1374}
1375
1376
1377exception_t decodeSetIPCBuffer(cap_t cap, word_t length, cte_t *slot,
1378                               extra_caps_t excaps, word_t *buffer)
1379{
1380    cptr_t cptr_bufferPtr;
1381    cap_t bufferCap;
1382    cte_t *bufferSlot;
1383
1384    if (length < 1 || excaps.excaprefs[0] == NULL) {
1385        userError("TCB SetIPCBuffer: Truncated message.");
1386        current_syscall_error.type = seL4_TruncatedMessage;
1387        return EXCEPTION_SYSCALL_ERROR;
1388    }
1389
1390    cptr_bufferPtr  = getSyscallArg(0, buffer);
1391    bufferSlot = excaps.excaprefs[0];
1392    bufferCap  = excaps.excaprefs[0]->cap;
1393
1394    if (cptr_bufferPtr == 0) {
1395        bufferSlot = NULL;
1396    } else {
1397        exception_t e;
1398        deriveCap_ret_t dc_ret;
1399
1400        dc_ret = deriveCap(bufferSlot, bufferCap);
1401        if (dc_ret.status != EXCEPTION_NONE) {
1402            return dc_ret.status;
1403        }
1404        bufferCap = dc_ret.cap;
1405        e = checkValidIPCBuffer(cptr_bufferPtr, bufferCap);
1406        if (e != EXCEPTION_NONE) {
1407            return e;
1408        }
1409    }
1410
1411    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1412#ifdef CONFIG_KERNEL_MCS
1413    return invokeTCB_ThreadControlCaps(
1414               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1415               cap_null_cap_new(), NULL,
1416               cap_null_cap_new(), NULL,
1417               cap_null_cap_new(), NULL,
1418               cap_null_cap_new(), NULL,
1419               cptr_bufferPtr, bufferCap,
1420               bufferSlot, thread_control_caps_update_ipc_buffer);
1421#else
1422    return invokeTCB_ThreadControl(
1423               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1424               0, NULL_PRIO, NULL_PRIO,
1425               cap_null_cap_new(), NULL,
1426               cap_null_cap_new(), NULL,
1427               cptr_bufferPtr, bufferCap,
1428               bufferSlot, thread_control_update_ipc_buffer);
1429
1430#endif
1431}
1432
1433#ifdef CONFIG_KERNEL_MCS
1434#define DECODE_SET_SPACE_PARAMS 2
1435#else
1436#define DECODE_SET_SPACE_PARAMS 3
1437#endif
1438exception_t decodeSetSpace(cap_t cap, word_t length, cte_t *slot,
1439                           extra_caps_t excaps, word_t *buffer)
1440{
1441    word_t cRootData, vRootData;
1442    cte_t *cRootSlot, *vRootSlot;
1443    cap_t cRootCap, vRootCap;
1444    deriveCap_ret_t dc_ret;
1445
1446    if (length < DECODE_SET_SPACE_PARAMS || excaps.excaprefs[0] == NULL
1447        || excaps.excaprefs[1] == NULL
1448#ifdef CONFIG_KERNEL_MCS
1449        || excaps.excaprefs[2] == NULL
1450#endif
1451       ) {
1452        userError("TCB SetSpace: Truncated message.");
1453        current_syscall_error.type = seL4_TruncatedMessage;
1454        return EXCEPTION_SYSCALL_ERROR;
1455    }
1456
1457#ifdef CONFIG_KERNEL_MCS
1458    cRootData = getSyscallArg(0, buffer);
1459    vRootData = getSyscallArg(1, buffer);
1460
1461    cte_t *fhSlot     = excaps.excaprefs[0];
1462    cap_t fhCap      = excaps.excaprefs[0]->cap;
1463    cRootSlot  = excaps.excaprefs[1];
1464    cRootCap   = excaps.excaprefs[1]->cap;
1465    vRootSlot  = excaps.excaprefs[2];
1466    vRootCap   = excaps.excaprefs[2]->cap;
1467#else
1468    cptr_t faultEP   = getSyscallArg(0, buffer);
1469    cRootData = getSyscallArg(1, buffer);
1470    vRootData = getSyscallArg(2, buffer);
1471
1472    cRootSlot  = excaps.excaprefs[0];
1473    cRootCap   = excaps.excaprefs[0]->cap;
1474    vRootSlot  = excaps.excaprefs[1];
1475    vRootCap   = excaps.excaprefs[1]->cap;
1476#endif
1477
1478    if (slotCapLongRunningDelete(
1479            TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbCTable)) ||
1480        slotCapLongRunningDelete(
1481            TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), tcbVTable))) {
1482        userError("TCB SetSpace: CSpace or VSpace currently being deleted.");
1483        current_syscall_error.type = seL4_IllegalOperation;
1484        return EXCEPTION_SYSCALL_ERROR;
1485    }
1486
1487    if (cRootData != 0) {
1488        cRootCap = updateCapData(false, cRootData, cRootCap);
1489    }
1490
1491    dc_ret = deriveCap(cRootSlot, cRootCap);
1492    if (dc_ret.status != EXCEPTION_NONE) {
1493        return dc_ret.status;
1494    }
1495    cRootCap = dc_ret.cap;
1496
1497    if (cap_get_capType(cRootCap) != cap_cnode_cap) {
1498        userError("TCB SetSpace: Invalid CNode cap.");
1499        current_syscall_error.type = seL4_IllegalOperation;
1500        return EXCEPTION_SYSCALL_ERROR;
1501    }
1502
1503    if (vRootData != 0) {
1504        vRootCap = updateCapData(false, vRootData, vRootCap);
1505    }
1506
1507    dc_ret = deriveCap(vRootSlot, vRootCap);
1508    if (dc_ret.status != EXCEPTION_NONE) {
1509        return dc_ret.status;
1510    }
1511    vRootCap = dc_ret.cap;
1512
1513    if (!isValidVTableRoot(vRootCap)) {
1514        userError("TCB SetSpace: Invalid VSpace cap.");
1515        current_syscall_error.type = seL4_IllegalOperation;
1516        return EXCEPTION_SYSCALL_ERROR;
1517    }
1518
1519#ifdef CONFIG_KERNEL_MCS
1520    /* fault handler */
1521    if (!validFaultHandler(fhCap)) {
1522        userError("TCB SetSpace: fault endpoint cap invalid.");
1523        current_syscall_error.invalidCapNumber = 1;
1524        return EXCEPTION_SYSCALL_ERROR;
1525    }
1526#endif
1527
1528    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1529#ifdef CONFIG_KERNEL_MCS
1530    return invokeTCB_ThreadControlCaps(
1531               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1532               fhCap, fhSlot,
1533               cap_null_cap_new(), NULL,
1534               cRootCap, cRootSlot,
1535               vRootCap, vRootSlot,
1536               0, cap_null_cap_new(), NULL, thread_control_caps_update_space | thread_control_caps_update_fault);
1537#else
1538    return invokeTCB_ThreadControl(
1539               TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)), slot,
1540               faultEP,
1541               NULL_PRIO, NULL_PRIO,
1542               cRootCap, cRootSlot,
1543               vRootCap, vRootSlot,
1544               0, cap_null_cap_new(), NULL, thread_control_update_space);
1545#endif
1546}
1547
1548exception_t decodeDomainInvocation(word_t invLabel, word_t length, extra_caps_t excaps, word_t *buffer)
1549{
1550    word_t domain;
1551    cap_t tcap;
1552
1553    if (unlikely(invLabel != DomainSetSet)) {
1554        current_syscall_error.type = seL4_IllegalOperation;
1555        return EXCEPTION_SYSCALL_ERROR;
1556    }
1557
1558    if (unlikely(length == 0)) {
1559        userError("Domain Configure: Truncated message.");
1560        current_syscall_error.type = seL4_TruncatedMessage;
1561        return EXCEPTION_SYSCALL_ERROR;
1562    } else {
1563        domain = getSyscallArg(0, buffer);
1564        if (domain >= CONFIG_NUM_DOMAINS) {
1565            userError("Domain Configure: invalid domain (%lu >= %u).",
1566                      domain, CONFIG_NUM_DOMAINS);
1567            current_syscall_error.type = seL4_InvalidArgument;
1568            current_syscall_error.invalidArgumentNumber = 0;
1569            return EXCEPTION_SYSCALL_ERROR;
1570        }
1571    }
1572
1573    if (unlikely(excaps.excaprefs[0] == NULL)) {
1574        userError("Domain Configure: Truncated message.");
1575        current_syscall_error.type = seL4_TruncatedMessage;
1576        return EXCEPTION_SYSCALL_ERROR;
1577    }
1578
1579    tcap = excaps.excaprefs[0]->cap;
1580    if (unlikely(cap_get_capType(tcap) != cap_thread_cap)) {
1581        userError("Domain Configure: thread cap required.");
1582        current_syscall_error.type = seL4_InvalidArgument;
1583        current_syscall_error.invalidArgumentNumber = 1;
1584        return EXCEPTION_SYSCALL_ERROR;
1585    }
1586
1587    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1588    setDomain(TCB_PTR(cap_thread_cap_get_capTCBPtr(tcap)), domain);
1589    return EXCEPTION_NONE;
1590}
1591
1592exception_t decodeBindNotification(cap_t cap, extra_caps_t excaps)
1593{
1594    notification_t *ntfnPtr;
1595    tcb_t *tcb;
1596    cap_t ntfn_cap;
1597
1598    if (excaps.excaprefs[0] == NULL) {
1599        userError("TCB BindNotification: Truncated message.");
1600        current_syscall_error.type = seL4_TruncatedMessage;
1601        return EXCEPTION_SYSCALL_ERROR;
1602    }
1603
1604    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1605
1606    if (tcb->tcbBoundNotification) {
1607        userError("TCB BindNotification: TCB already has a bound notification.");
1608        current_syscall_error.type = seL4_IllegalOperation;
1609        return EXCEPTION_SYSCALL_ERROR;
1610    }
1611
1612    ntfn_cap = excaps.excaprefs[0]->cap;
1613
1614    if (cap_get_capType(ntfn_cap) == cap_notification_cap) {
1615        ntfnPtr = NTFN_PTR(cap_notification_cap_get_capNtfnPtr(ntfn_cap));
1616    } else {
1617        userError("TCB BindNotification: Notification is invalid.");
1618        current_syscall_error.type = seL4_IllegalOperation;
1619        return EXCEPTION_SYSCALL_ERROR;
1620    }
1621
1622    if (!cap_notification_cap_get_capNtfnCanReceive(ntfn_cap)) {
1623        userError("TCB BindNotification: Insufficient access rights");
1624        current_syscall_error.type = seL4_IllegalOperation;
1625        return EXCEPTION_SYSCALL_ERROR;
1626    }
1627
1628    if ((tcb_t *)notification_ptr_get_ntfnQueue_head(ntfnPtr)
1629        || (tcb_t *)notification_ptr_get_ntfnBoundTCB(ntfnPtr)) {
1630        userError("TCB BindNotification: Notification cannot be bound.");
1631        current_syscall_error.type = seL4_IllegalOperation;
1632        return EXCEPTION_SYSCALL_ERROR;
1633    }
1634
1635
1636    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1637    return invokeTCB_NotificationControl(tcb, ntfnPtr);
1638}
1639
1640exception_t decodeUnbindNotification(cap_t cap)
1641{
1642    tcb_t *tcb;
1643
1644    tcb = TCB_PTR(cap_thread_cap_get_capTCBPtr(cap));
1645
1646    if (!tcb->tcbBoundNotification) {
1647        userError("TCB UnbindNotification: TCB already has no bound Notification.");
1648        current_syscall_error.type = seL4_IllegalOperation;
1649        return EXCEPTION_SYSCALL_ERROR;
1650    }
1651
1652    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
1653    return invokeTCB_NotificationControl(tcb, NULL);
1654}
1655
1656/* The following functions sit in the preemption monad and implement the
1657 * preemptible, non-faulting bottom end of a TCB invocation. */
1658exception_t invokeTCB_Suspend(tcb_t *thread)
1659{
1660    suspend(thread);
1661    return EXCEPTION_NONE;
1662}
1663
1664exception_t invokeTCB_Resume(tcb_t *thread)
1665{
1666    restart(thread);
1667    return EXCEPTION_NONE;
1668}
1669
1670#ifdef CONFIG_KERNEL_MCS
1671static inline exception_t installTCBCap(tcb_t *target, cap_t tCap, cte_t *slot,
1672                                        tcb_cnode_index_t index, cap_t newCap, cte_t *srcSlot)
1673{
1674    cte_t *rootSlot = TCB_PTR_CTE_PTR(target, index);
1675    UNUSED exception_t e = cteDelete(rootSlot, true);
1676    if (e != EXCEPTION_NONE) {
1677        return e;
1678    }
1679
1680    /* cteDelete on a cap installed in the tcb cannot fail */
1681    if (sameObjectAs(newCap, srcSlot->cap) &&
1682        sameObjectAs(tCap, slot->cap)) {
1683        cteInsert(newCap, srcSlot, rootSlot);
1684    }
1685    return e;
1686}
1687#endif
1688
1689#ifdef CONFIG_KERNEL_MCS
1690exception_t invokeTCB_ThreadControlCaps(tcb_t *target, cte_t *slot,
1691                                        cap_t fh_newCap, cte_t *fh_srcSlot,
1692                                        cap_t th_newCap, cte_t *th_srcSlot,
1693                                        cap_t cRoot_newCap, cte_t *cRoot_srcSlot,
1694                                        cap_t vRoot_newCap, cte_t *vRoot_srcSlot,
1695                                        word_t bufferAddr, cap_t bufferCap,
1696                                        cte_t *bufferSrcSlot,
1697                                        thread_control_flag_t updateFlags)
1698{
1699    exception_t e;
1700    cap_t tCap = cap_thread_cap_new((word_t)target);
1701
1702    if (updateFlags & thread_control_caps_update_fault) {
1703        e = installTCBCap(target, tCap, slot, tcbFaultHandler, fh_newCap, fh_srcSlot);
1704        if (e != EXCEPTION_NONE) {
1705            return e;
1706        }
1707
1708    }
1709
1710    if (updateFlags & thread_control_caps_update_timeout) {
1711        e = installTCBCap(target, tCap, slot, tcbTimeoutHandler, th_newCap, th_srcSlot);
1712        if (e != EXCEPTION_NONE) {
1713            return e;
1714        }
1715    }
1716
1717    if (updateFlags & thread_control_caps_update_space) {
1718        e = installTCBCap(target, tCap, slot, tcbCTable, cRoot_newCap, cRoot_srcSlot);
1719        if (e != EXCEPTION_NONE) {
1720            return e;
1721        }
1722
1723        e = installTCBCap(target, tCap, slot, tcbVTable, vRoot_newCap, vRoot_srcSlot);
1724        if (e != EXCEPTION_NONE) {
1725            return e;
1726        }
1727    }
1728
1729    if (updateFlags & thread_control_caps_update_ipc_buffer) {
1730        cte_t *bufferSlot;
1731
1732        bufferSlot = TCB_PTR_CTE_PTR(target, tcbBuffer);
1733        e = cteDelete(bufferSlot, true);
1734        if (e != EXCEPTION_NONE) {
1735            return e;
1736        }
1737        target->tcbIPCBuffer = bufferAddr;
1738
1739        if (bufferSrcSlot && sameObjectAs(bufferCap, bufferSrcSlot->cap) &&
1740            sameObjectAs(tCap, slot->cap)) {
1741            cteInsert(bufferCap, bufferSrcSlot, bufferSlot);
1742        }
1743
1744        if (target == NODE_STATE(ksCurThread)) {
1745            rescheduleRequired();
1746        }
1747    }
1748
1749    return EXCEPTION_NONE;
1750}
1751#else
1752exception_t invokeTCB_ThreadControl(tcb_t *target, cte_t *slot,
1753                                    cptr_t faultep, prio_t mcp, prio_t priority,
1754                                    cap_t cRoot_newCap, cte_t *cRoot_srcSlot,
1755                                    cap_t vRoot_newCap, cte_t *vRoot_srcSlot,
1756                                    word_t bufferAddr, cap_t bufferCap,
1757                                    cte_t *bufferSrcSlot,
1758                                    thread_control_flag_t updateFlags)
1759{
1760    exception_t e;
1761    cap_t tCap = cap_thread_cap_new((word_t)target);
1762
1763    if (updateFlags & thread_control_update_space) {
1764        target->tcbFaultHandler = faultep;
1765    }
1766
1767    if (updateFlags & thread_control_update_mcp) {
1768        setMCPriority(target, mcp);
1769    }
1770
1771    if (updateFlags & thread_control_update_space) {
1772        cte_t *rootSlot;
1773
1774        rootSlot = TCB_PTR_CTE_PTR(target, tcbCTable);
1775        e = cteDelete(rootSlot, true);
1776        if (e != EXCEPTION_NONE) {
1777            return e;
1778        }
1779        if (sameObjectAs(cRoot_newCap, cRoot_srcSlot->cap) &&
1780            sameObjectAs(tCap, slot->cap)) {
1781            cteInsert(cRoot_newCap, cRoot_srcSlot, rootSlot);
1782        }
1783    }
1784
1785    if (updateFlags & thread_control_update_space) {
1786        cte_t *rootSlot;
1787
1788        rootSlot = TCB_PTR_CTE_PTR(target, tcbVTable);
1789        e = cteDelete(rootSlot, true);
1790        if (e != EXCEPTION_NONE) {
1791            return e;
1792        }
1793        if (sameObjectAs(vRoot_newCap, vRoot_srcSlot->cap) &&
1794            sameObjectAs(tCap, slot->cap)) {
1795            cteInsert(vRoot_newCap, vRoot_srcSlot, rootSlot);
1796        }
1797    }
1798
1799    if (updateFlags & thread_control_update_ipc_buffer) {
1800        cte_t *bufferSlot;
1801
1802        bufferSlot = TCB_PTR_CTE_PTR(target, tcbBuffer);
1803        e = cteDelete(bufferSlot, true);
1804        if (e != EXCEPTION_NONE) {
1805            return e;
1806        }
1807        target->tcbIPCBuffer = bufferAddr;
1808
1809        if (bufferSrcSlot && sameObjectAs(bufferCap, bufferSrcSlot->cap) &&
1810            sameObjectAs(tCap, slot->cap)) {
1811            cteInsert(bufferCap, bufferSrcSlot, bufferSlot);
1812        }
1813
1814        if (target == NODE_STATE(ksCurThread)) {
1815            rescheduleRequired();
1816        }
1817    }
1818
1819    if (updateFlags & thread_control_update_priority) {
1820        setPriority(target, priority);
1821    }
1822
1823    return EXCEPTION_NONE;
1824}
1825#endif
1826
1827#ifdef CONFIG_KERNEL_MCS
1828exception_t invokeTCB_ThreadControlSched(tcb_t *target, cte_t *slot,
1829                                         cap_t fh_newCap, cte_t *fh_srcSlot,
1830                                         prio_t mcp, prio_t priority,
1831                                         sched_context_t *sc,
1832                                         thread_control_flag_t updateFlags)
1833{
1834    if (updateFlags & thread_control_sched_update_fault) {
1835        cap_t tCap = cap_thread_cap_new((word_t)target);
1836        exception_t e = installTCBCap(target, tCap, slot, tcbFaultHandler, fh_newCap, fh_srcSlot);
1837        if (e != EXCEPTION_NONE) {
1838            return e;
1839        }
1840    }
1841
1842    if (updateFlags & thread_control_sched_update_mcp) {
1843        setMCPriority(target, mcp);
1844    }
1845
1846    if (updateFlags & thread_control_sched_update_priority) {
1847        setPriority(target, priority);
1848    }
1849
1850    if (updateFlags & thread_control_sched_update_sc) {
1851        if (sc != NULL && sc != target->tcbSchedContext) {
1852            schedContext_bindTCB(sc, target);
1853        } else if (sc == NULL && target->tcbSchedContext != NULL) {
1854            schedContext_unbindTCB(target->tcbSchedContext, target);
1855        }
1856    }
1857
1858    return EXCEPTION_NONE;
1859}
1860#endif
1861
1862exception_t invokeTCB_CopyRegisters(tcb_t *dest, tcb_t *tcb_src,
1863                                    bool_t suspendSource, bool_t resumeTarget,
1864                                    bool_t transferFrame, bool_t transferInteger,
1865                                    word_t transferArch)
1866{
1867    if (suspendSource) {
1868        suspend(tcb_src);
1869    }
1870
1871    if (resumeTarget) {
1872        restart(dest);
1873    }
1874
1875    if (transferFrame) {
1876        word_t i;
1877        word_t v;
1878        word_t pc;
1879
1880        for (i = 0; i < n_frameRegisters; i++) {
1881            v = getRegister(tcb_src, frameRegisters[i]);
1882            setRegister(dest, frameRegisters[i], v);
1883        }
1884
1885        pc = getRestartPC(dest);
1886        setNextPC(dest, pc);
1887    }
1888
1889    if (transferInteger) {
1890        word_t i;
1891        word_t v;
1892
1893        for (i = 0; i < n_gpRegisters; i++) {
1894            v = getRegister(tcb_src, gpRegisters[i]);
1895            setRegister(dest, gpRegisters[i], v);
1896        }
1897    }
1898
1899    Arch_postModifyRegisters(dest);
1900
1901    if (dest == NODE_STATE(ksCurThread)) {
1902        /* If we modified the current thread we may need to reschedule
1903         * due to changing registers are only reloaded in Arch_switchToThread */
1904        rescheduleRequired();
1905    }
1906
1907    return Arch_performTransfer(transferArch, tcb_src, dest);
1908}
1909
1910/* ReadRegisters is a special case: replyFromKernel & setMRs are
1911 * unfolded here, in order to avoid passing the large reply message up
1912 * to the top level in a global (and double-copying). We prevent the
1913 * top-level replyFromKernel_success_empty() from running by setting the
1914 * thread state. Retype does this too.
1915 */
1916exception_t invokeTCB_ReadRegisters(tcb_t *tcb_src, bool_t suspendSource,
1917                                    word_t n, word_t arch, bool_t call)
1918{
1919    word_t i, j;
1920    exception_t e;
1921    tcb_t *thread;
1922
1923    thread = NODE_STATE(ksCurThread);
1924
1925    if (suspendSource) {
1926        suspend(tcb_src);
1927    }
1928
1929    e = Arch_performTransfer(arch, tcb_src, NODE_STATE(ksCurThread));
1930    if (e != EXCEPTION_NONE) {
1931        return e;
1932    }
1933
1934    if (call) {
1935        word_t *ipcBuffer;
1936
1937        ipcBuffer = lookupIPCBuffer(true, thread);
1938
1939        setRegister(thread, badgeRegister, 0);
1940
1941        for (i = 0; i < n && i < n_frameRegisters && i < n_msgRegisters; i++) {
1942            setRegister(thread, msgRegisters[i],
1943                        getRegister(tcb_src, frameRegisters[i]));
1944        }
1945
1946        if (ipcBuffer != NULL && i < n && i < n_frameRegisters) {
1947            for (; i < n && i < n_frameRegisters; i++) {
1948                ipcBuffer[i + 1] = getRegister(tcb_src, frameRegisters[i]);
1949            }
1950        }
1951
1952        j = i;
1953
1954        for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n
1955             && i + n_frameRegisters < n_msgRegisters; i++) {
1956            setRegister(thread, msgRegisters[i + n_frameRegisters],
1957                        getRegister(tcb_src, gpRegisters[i]));
1958        }
1959
1960        if (ipcBuffer != NULL && i < n_gpRegisters
1961            && i + n_frameRegisters < n) {
1962            for (; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
1963                ipcBuffer[i + n_frameRegisters + 1] =
1964                    getRegister(tcb_src, gpRegisters[i]);
1965            }
1966        }
1967
1968        setRegister(thread, msgInfoRegister, wordFromMessageInfo(
1969                        seL4_MessageInfo_new(0, 0, 0, i + j)));
1970    }
1971    setThreadState(thread, ThreadState_Running);
1972
1973    return EXCEPTION_NONE;
1974}
1975
1976exception_t invokeTCB_WriteRegisters(tcb_t *dest, bool_t resumeTarget,
1977                                     word_t n, word_t arch, word_t *buffer)
1978{
1979    word_t i;
1980    word_t pc;
1981    exception_t e;
1982    bool_t archInfo;
1983
1984    e = Arch_performTransfer(arch, NODE_STATE(ksCurThread), dest);
1985    if (e != EXCEPTION_NONE) {
1986        return e;
1987    }
1988
1989    if (n > n_frameRegisters + n_gpRegisters) {
1990        n = n_frameRegisters + n_gpRegisters;
1991    }
1992
1993    archInfo = Arch_getSanitiseRegisterInfo(dest);
1994
1995    for (i = 0; i < n_frameRegisters && i < n; i++) {
1996        /* Offset of 2 to get past the initial syscall arguments */
1997        setRegister(dest, frameRegisters[i],
1998                    sanitiseRegister(frameRegisters[i],
1999                                     getSyscallArg(i + 2, buffer), archInfo));
2000    }
2001
2002    for (i = 0; i < n_gpRegisters && i + n_frameRegisters < n; i++) {
2003        setRegister(dest, gpRegisters[i],
2004                    sanitiseRegister(gpRegisters[i],
2005                                     getSyscallArg(i + n_frameRegisters + 2,
2006                                                   buffer), archInfo));
2007    }
2008
2009    pc = getRestartPC(dest);
2010    setNextPC(dest, pc);
2011
2012    Arch_postModifyRegisters(dest);
2013
2014    if (resumeTarget) {
2015        restart(dest);
2016    }
2017
2018    if (dest == NODE_STATE(ksCurThread)) {
2019        /* If we modified the current thread we may need to reschedule
2020         * due to changing registers are only reloaded in Arch_switchToThread */
2021        rescheduleRequired();
2022    }
2023
2024    return EXCEPTION_NONE;
2025}
2026
2027exception_t invokeTCB_NotificationControl(tcb_t *tcb, notification_t *ntfnPtr)
2028{
2029    if (ntfnPtr) {
2030        bindNotification(tcb, ntfnPtr);
2031    } else {
2032        unbindNotification(tcb);
2033    }
2034
2035    return EXCEPTION_NONE;
2036}
2037
2038#ifdef CONFIG_DEBUG_BUILD
2039void setThreadName(tcb_t *tcb, const char *name)
2040{
2041    strlcpy(TCB_PTR_DEBUG_PTR(tcb)->tcbName, name, TCB_NAME_LENGTH);
2042}
2043#endif
2044
2045word_t setMRs_syscall_error(tcb_t *thread, word_t *receiveIPCBuffer)
2046{
2047    switch (current_syscall_error.type) {
2048    case seL4_InvalidArgument:
2049        return setMR(thread, receiveIPCBuffer, 0,
2050                     current_syscall_error.invalidArgumentNumber);
2051
2052    case seL4_InvalidCapability:
2053        return setMR(thread, receiveIPCBuffer, 0,
2054                     current_syscall_error.invalidCapNumber);
2055
2056    case seL4_IllegalOperation:
2057        return 0;
2058
2059    case seL4_RangeError:
2060        setMR(thread, receiveIPCBuffer, 0,
2061              current_syscall_error.rangeErrorMin);
2062        return setMR(thread, receiveIPCBuffer, 1,
2063                     current_syscall_error.rangeErrorMax);
2064
2065    case seL4_AlignmentError:
2066        return 0;
2067
2068    case seL4_FailedLookup:
2069        setMR(thread, receiveIPCBuffer, 0,
2070              current_syscall_error.failedLookupWasSource ? 1 : 0);
2071        return setMRs_lookup_failure(thread, receiveIPCBuffer,
2072                                     current_lookup_fault, 1);
2073
2074    case seL4_TruncatedMessage:
2075    case seL4_DeleteFirst:
2076    case seL4_RevokeFirst:
2077        return 0;
2078    case seL4_NotEnoughMemory:
2079        return setMR(thread, receiveIPCBuffer, 0,
2080                     current_syscall_error.memoryLeft);
2081    default:
2082        fail("Invalid syscall error");
2083    }
2084}
2085