1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <config.h>
8#include <object.h>
9#include <util.h>
10#include <api/faults.h>
11#include <api/types.h>
12#include <kernel/cspace.h>
13#include <kernel/thread.h>
14#include <kernel/vspace.h>
15#ifdef CONFIG_KERNEL_MCS
16#include <object/schedcontext.h>
17#endif
18#include <model/statedata.h>
19#include <arch/machine.h>
20#include <arch/kernel/thread.h>
21#include <machine/registerset.h>
22#include <linker.h>
23
24static seL4_MessageInfo_t
25transferCaps(seL4_MessageInfo_t info, extra_caps_t caps,
26             endpoint_t *endpoint, tcb_t *receiver,
27             word_t *receiveBuffer);
28
29BOOT_CODE void configureIdleThread(tcb_t *tcb)
30{
31    Arch_configureIdleThread(tcb);
32    setThreadState(tcb, ThreadState_IdleThreadState);
33}
34
35void activateThread(void)
36{
37#ifdef CONFIG_KERNEL_MCS
38    if (unlikely(NODE_STATE(ksCurThread)->tcbYieldTo)) {
39        schedContext_completeYieldTo(NODE_STATE(ksCurThread));
40        assert(thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) == ThreadState_Running);
41    }
42#endif
43
44    switch (thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState)) {
45    case ThreadState_Running:
46#ifdef CONFIG_VTX
47    case ThreadState_RunningVM:
48#endif
49        break;
50
51    case ThreadState_Restart: {
52        word_t pc;
53
54        pc = getRestartPC(NODE_STATE(ksCurThread));
55        setNextPC(NODE_STATE(ksCurThread), pc);
56        setThreadState(NODE_STATE(ksCurThread), ThreadState_Running);
57        break;
58    }
59
60    case ThreadState_IdleThreadState:
61        Arch_activateIdleThread(NODE_STATE(ksCurThread));
62        break;
63
64    default:
65        fail("Current thread is blocked");
66    }
67}
68
69void suspend(tcb_t *target)
70{
71    cancelIPC(target);
72    if (thread_state_get_tsType(target->tcbState) == ThreadState_Running) {
73        /* whilst in the running state it is possible that restart pc of a thread is
74         * incorrect. As we do not know what state this thread will transition to
75         * after we make it inactive we update its restart pc so that the thread next
76         * runs at the correct address whether it is restarted or moved directly to
77         * running */
78        updateRestartPC(target);
79    }
80    setThreadState(target, ThreadState_Inactive);
81    tcbSchedDequeue(target);
82#ifdef CONFIG_KERNEL_MCS
83    tcbReleaseRemove(target);
84    schedContext_cancelYieldTo(target);
85#endif
86}
87
88void restart(tcb_t *target)
89{
90    if (isStopped(target)) {
91        cancelIPC(target);
92#ifdef CONFIG_KERNEL_MCS
93        setThreadState(target, ThreadState_Restart);
94        schedContext_resume(target->tcbSchedContext);
95        if (isSchedulable(target)) {
96            possibleSwitchTo(target);
97        }
98#else
99        setupReplyMaster(target);
100        setThreadState(target, ThreadState_Restart);
101        SCHED_ENQUEUE(target);
102        possibleSwitchTo(target);
103#endif
104    }
105}
106
107void doIPCTransfer(tcb_t *sender, endpoint_t *endpoint, word_t badge,
108                   bool_t grant, tcb_t *receiver)
109{
110    void *receiveBuffer, *sendBuffer;
111
112    receiveBuffer = lookupIPCBuffer(true, receiver);
113
114    if (likely(seL4_Fault_get_seL4_FaultType(sender->tcbFault) == seL4_Fault_NullFault)) {
115        sendBuffer = lookupIPCBuffer(false, sender);
116        doNormalTransfer(sender, sendBuffer, endpoint, badge, grant,
117                         receiver, receiveBuffer);
118    } else {
119        doFaultTransfer(badge, sender, receiver, receiveBuffer);
120    }
121}
122
123#ifdef CONFIG_KERNEL_MCS
124void doReplyTransfer(tcb_t *sender, reply_t *reply, bool_t grant)
125#else
126void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot, bool_t grant)
127#endif
128{
129#ifdef CONFIG_KERNEL_MCS
130    if (reply->replyTCB == NULL ||
131        thread_state_get_tsType(reply->replyTCB->tcbState) != ThreadState_BlockedOnReply) {
132        /* nothing to do */
133        return;
134    }
135
136    tcb_t *receiver = reply->replyTCB;
137    reply_remove(reply, receiver);
138    assert(thread_state_get_replyObject(receiver->tcbState) == REPLY_REF(0));
139    assert(reply->replyTCB == NULL);
140#else
141    assert(thread_state_get_tsType(receiver->tcbState) ==
142           ThreadState_BlockedOnReply);
143#endif
144
145    word_t fault_type = seL4_Fault_get_seL4_FaultType(receiver->tcbFault);
146    if (likely(fault_type == seL4_Fault_NullFault)) {
147        doIPCTransfer(sender, NULL, 0, grant, receiver);
148#ifdef CONFIG_KERNEL_MCS
149        setThreadState(receiver, ThreadState_Running);
150#else
151        /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
152        cteDeleteOne(slot);
153        setThreadState(receiver, ThreadState_Running);
154        possibleSwitchTo(receiver);
155#endif
156    } else {
157#ifndef CONFIG_KERNEL_MCS
158        /** GHOSTUPD: "(True, gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
159        cteDeleteOne(slot);
160#endif
161        bool_t restart = handleFaultReply(receiver, sender);
162        receiver->tcbFault = seL4_Fault_NullFault_new();
163        if (restart) {
164            setThreadState(receiver, ThreadState_Restart);
165#ifndef CONFIG_KERNEL_MCS
166            possibleSwitchTo(receiver);
167#endif
168        } else {
169            setThreadState(receiver, ThreadState_Inactive);
170        }
171    }
172
173#ifdef CONFIG_KERNEL_MCS
174    if (receiver->tcbSchedContext && isRunnable(receiver)) {
175        if ((refill_ready(receiver->tcbSchedContext) && refill_sufficient(receiver->tcbSchedContext, 0))) {
176            possibleSwitchTo(receiver);
177        } else {
178            if (validTimeoutHandler(receiver) && fault_type != seL4_Fault_Timeout) {
179                current_fault = seL4_Fault_Timeout_new(receiver->tcbSchedContext->scBadge);
180                handleTimeout(receiver);
181            } else {
182                postpone(receiver->tcbSchedContext);
183            }
184        }
185    }
186#endif
187}
188
189void doNormalTransfer(tcb_t *sender, word_t *sendBuffer, endpoint_t *endpoint,
190                      word_t badge, bool_t canGrant, tcb_t *receiver,
191                      word_t *receiveBuffer)
192{
193    word_t msgTransferred;
194    seL4_MessageInfo_t tag;
195    exception_t status;
196    extra_caps_t caps;
197
198    tag = messageInfoFromWord(getRegister(sender, msgInfoRegister));
199
200    if (canGrant) {
201        status = lookupExtraCaps(sender, sendBuffer, tag);
202        caps = current_extra_caps;
203        if (unlikely(status != EXCEPTION_NONE)) {
204            caps.excaprefs[0] = NULL;
205        }
206    } else {
207        caps = current_extra_caps;
208        caps.excaprefs[0] = NULL;
209    }
210
211    msgTransferred = copyMRs(sender, sendBuffer, receiver, receiveBuffer,
212                             seL4_MessageInfo_get_length(tag));
213
214    tag = transferCaps(tag, caps, endpoint, receiver, receiveBuffer);
215
216    tag = seL4_MessageInfo_set_length(tag, msgTransferred);
217    setRegister(receiver, msgInfoRegister, wordFromMessageInfo(tag));
218    setRegister(receiver, badgeRegister, badge);
219}
220
221void doFaultTransfer(word_t badge, tcb_t *sender, tcb_t *receiver,
222                     word_t *receiverIPCBuffer)
223{
224    word_t sent;
225    seL4_MessageInfo_t msgInfo;
226
227    sent = setMRs_fault(sender, receiver, receiverIPCBuffer);
228    msgInfo = seL4_MessageInfo_new(
229                  seL4_Fault_get_seL4_FaultType(sender->tcbFault), 0, 0, sent);
230    setRegister(receiver, msgInfoRegister, wordFromMessageInfo(msgInfo));
231    setRegister(receiver, badgeRegister, badge);
232}
233
234/* Like getReceiveSlots, this is specialised for single-cap transfer. */
235static seL4_MessageInfo_t transferCaps(seL4_MessageInfo_t info, extra_caps_t caps,
236                                       endpoint_t *endpoint, tcb_t *receiver,
237                                       word_t *receiveBuffer)
238{
239    word_t i;
240    cte_t *destSlot;
241
242    info = seL4_MessageInfo_set_extraCaps(info, 0);
243    info = seL4_MessageInfo_set_capsUnwrapped(info, 0);
244
245    if (likely(!caps.excaprefs[0] || !receiveBuffer)) {
246        return info;
247    }
248
249    destSlot = getReceiveSlots(receiver, receiveBuffer);
250
251    for (i = 0; i < seL4_MsgMaxExtraCaps && caps.excaprefs[i] != NULL; i++) {
252        cte_t *slot = caps.excaprefs[i];
253        cap_t cap = slot->cap;
254
255        if (cap_get_capType(cap) == cap_endpoint_cap &&
256            EP_PTR(cap_endpoint_cap_get_capEPPtr(cap)) == endpoint) {
257            /* If this is a cap to the endpoint on which the message was sent,
258             * only transfer the badge, not the cap. */
259            setExtraBadge(receiveBuffer,
260                          cap_endpoint_cap_get_capEPBadge(cap), i);
261
262            info = seL4_MessageInfo_set_capsUnwrapped(info,
263                                                      seL4_MessageInfo_get_capsUnwrapped(info) | (1 << i));
264
265        } else {
266            deriveCap_ret_t dc_ret;
267
268            if (!destSlot) {
269                break;
270            }
271
272            dc_ret = deriveCap(slot, cap);
273
274            if (dc_ret.status != EXCEPTION_NONE) {
275                break;
276            }
277            if (cap_get_capType(dc_ret.cap) == cap_null_cap) {
278                break;
279            }
280
281            cteInsert(dc_ret.cap, slot, destSlot);
282
283            destSlot = NULL;
284        }
285    }
286
287    return seL4_MessageInfo_set_extraCaps(info, i);
288}
289
290void doNBRecvFailedTransfer(tcb_t *thread)
291{
292    /* Set the badge register to 0 to indicate there was no message */
293    setRegister(thread, badgeRegister, 0);
294}
295
296static void nextDomain(void)
297{
298    ksDomScheduleIdx++;
299    if (ksDomScheduleIdx >= ksDomScheduleLength) {
300        ksDomScheduleIdx = 0;
301    }
302#ifdef CONFIG_KERNEL_MCS
303    NODE_STATE(ksReprogram) = true;
304#endif
305    ksWorkUnitsCompleted = 0;
306    ksCurDomain = ksDomSchedule[ksDomScheduleIdx].domain;
307#ifdef CONFIG_KERNEL_MCS
308    ksDomainTime = usToTicks(ksDomSchedule[ksDomScheduleIdx].length * US_IN_MS);
309#else
310    ksDomainTime = ksDomSchedule[ksDomScheduleIdx].length;
311#endif
312}
313
314#ifdef CONFIG_KERNEL_MCS
315static void switchSchedContext(void)
316{
317    if (unlikely(NODE_STATE(ksCurSC) != NODE_STATE(ksCurThread)->tcbSchedContext) && NODE_STATE(ksCurSC)->scRefillMax) {
318        NODE_STATE(ksReprogram) = true;
319        refill_unblock_check(NODE_STATE(ksCurThread->tcbSchedContext));
320
321        assert(refill_ready(NODE_STATE(ksCurThread->tcbSchedContext)));
322        assert(refill_sufficient(NODE_STATE(ksCurThread->tcbSchedContext), 0));
323    }
324
325    if (NODE_STATE(ksReprogram)) {
326        /* if we are reprogamming, we have acted on the new kernel time and cannot
327         * rollback -> charge the current thread */
328        commitTime();
329    }
330
331    NODE_STATE(ksCurSC) = NODE_STATE(ksCurThread)->tcbSchedContext;
332}
333#endif
334
335static void scheduleChooseNewThread(void)
336{
337    if (ksDomainTime == 0) {
338        nextDomain();
339    }
340    chooseThread();
341}
342
343void schedule(void)
344{
345#ifdef CONFIG_KERNEL_MCS
346    awaken();
347#endif
348
349    if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread) {
350        bool_t was_runnable;
351        if (isSchedulable(NODE_STATE(ksCurThread))) {
352            was_runnable = true;
353            SCHED_ENQUEUE_CURRENT_TCB;
354        } else {
355            was_runnable = false;
356        }
357
358        if (NODE_STATE(ksSchedulerAction) == SchedulerAction_ChooseNewThread) {
359            scheduleChooseNewThread();
360        } else {
361            tcb_t *candidate = NODE_STATE(ksSchedulerAction);
362            assert(isSchedulable(candidate));
363            /* Avoid checking bitmap when ksCurThread is higher prio, to
364             * match fast path.
365             * Don't look at ksCurThread prio when it's idle, to respect
366             * information flow in non-fastpath cases. */
367            bool_t fastfail =
368                NODE_STATE(ksCurThread) == NODE_STATE(ksIdleThread)
369                || (candidate->tcbPriority < NODE_STATE(ksCurThread)->tcbPriority);
370            if (fastfail &&
371                !isHighestPrio(ksCurDomain, candidate->tcbPriority)) {
372                SCHED_ENQUEUE(candidate);
373                /* we can't, need to reschedule */
374                NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
375                scheduleChooseNewThread();
376            } else if (was_runnable && candidate->tcbPriority == NODE_STATE(ksCurThread)->tcbPriority) {
377                /* We append the candidate at the end of the scheduling queue, that way the
378                 * current thread, that was enqueued at the start of the scheduling queue
379                 * will get picked during chooseNewThread */
380                SCHED_APPEND(candidate);
381                NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
382                scheduleChooseNewThread();
383            } else {
384                assert(candidate != NODE_STATE(ksCurThread));
385                switchToThread(candidate);
386            }
387        }
388    }
389    NODE_STATE(ksSchedulerAction) = SchedulerAction_ResumeCurrentThread;
390#ifdef ENABLE_SMP_SUPPORT
391    doMaskReschedule(ARCH_NODE_STATE(ipiReschedulePending));
392    ARCH_NODE_STATE(ipiReschedulePending) = 0;
393#endif /* ENABLE_SMP_SUPPORT */
394
395#ifdef CONFIG_KERNEL_MCS
396    switchSchedContext();
397
398    if (NODE_STATE(ksReprogram)) {
399        setNextInterrupt();
400        NODE_STATE(ksReprogram) = false;
401    }
402#endif
403}
404
405void chooseThread(void)
406{
407    word_t prio;
408    word_t dom;
409    tcb_t *thread;
410
411    if (CONFIG_NUM_DOMAINS > 1) {
412        dom = ksCurDomain;
413    } else {
414        dom = 0;
415    }
416
417    if (likely(NODE_STATE(ksReadyQueuesL1Bitmap[dom]))) {
418        prio = getHighestPrio(dom);
419        thread = NODE_STATE(ksReadyQueues)[ready_queues_index(dom, prio)].head;
420        assert(thread);
421        assert(isSchedulable(thread));
422#ifdef CONFIG_KERNEL_MCS
423        assert(refill_sufficient(thread->tcbSchedContext, 0));
424        assert(refill_ready(thread->tcbSchedContext));
425#endif
426        switchToThread(thread);
427    } else {
428        switchToIdleThread();
429    }
430}
431
432void switchToThread(tcb_t *thread)
433{
434#ifdef CONFIG_KERNEL_MCS
435    assert(thread->tcbSchedContext != NULL);
436    assert(!thread_state_get_tcbInReleaseQueue(thread->tcbState));
437    assert(refill_sufficient(thread->tcbSchedContext, 0));
438    assert(refill_ready(thread->tcbSchedContext));
439#endif
440
441#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
442    benchmark_utilisation_switch(NODE_STATE(ksCurThread), thread);
443#endif
444    Arch_switchToThread(thread);
445    tcbSchedDequeue(thread);
446    NODE_STATE(ksCurThread) = thread;
447}
448
449void switchToIdleThread(void)
450{
451#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
452    benchmark_utilisation_switch(NODE_STATE(ksCurThread), NODE_STATE(ksIdleThread));
453#endif
454    Arch_switchToIdleThread();
455    NODE_STATE(ksCurThread) = NODE_STATE(ksIdleThread);
456}
457
458void setDomain(tcb_t *tptr, dom_t dom)
459{
460    tcbSchedDequeue(tptr);
461    tptr->tcbDomain = dom;
462    if (isSchedulable(tptr)) {
463        SCHED_ENQUEUE(tptr);
464    }
465    if (tptr == NODE_STATE(ksCurThread)) {
466        rescheduleRequired();
467    }
468}
469
470void setMCPriority(tcb_t *tptr, prio_t mcp)
471{
472    tptr->tcbMCP = mcp;
473}
474#ifdef CONFIG_KERNEL_MCS
475void setPriority(tcb_t *tptr, prio_t prio)
476{
477    switch (thread_state_get_tsType(tptr->tcbState)) {
478    case ThreadState_Running:
479    case ThreadState_Restart:
480        if (thread_state_get_tcbQueued(tptr->tcbState) || tptr == NODE_STATE(ksCurThread)) {
481            tcbSchedDequeue(tptr);
482            tptr->tcbPriority = prio;
483            SCHED_ENQUEUE(tptr);
484            rescheduleRequired();
485        } else {
486            tptr->tcbPriority = prio;
487        }
488        break;
489    case ThreadState_BlockedOnReceive:
490    case ThreadState_BlockedOnSend:
491        tptr->tcbPriority = prio;
492        reorderEP(EP_PTR(thread_state_get_blockingObject(tptr->tcbState)), tptr);
493        break;
494    case ThreadState_BlockedOnNotification:
495        tptr->tcbPriority = prio;
496        reorderNTFN(NTFN_PTR(thread_state_get_blockingObject(tptr->tcbState)), tptr);
497        break;
498    default:
499        tptr->tcbPriority = prio;
500        break;
501    }
502}
503#else
504void setPriority(tcb_t *tptr, prio_t prio)
505{
506    tcbSchedDequeue(tptr);
507    tptr->tcbPriority = prio;
508    if (isRunnable(tptr)) {
509        if (tptr == NODE_STATE(ksCurThread)) {
510            rescheduleRequired();
511        } else {
512            possibleSwitchTo(tptr);
513        }
514    }
515}
516#endif
517
518/* Note that this thread will possibly continue at the end of this kernel
519 * entry. Do not queue it yet, since a queue+unqueue operation is wasteful
520 * if it will be picked. Instead, it waits in the 'ksSchedulerAction' site
521 * on which the scheduler will take action. */
522void possibleSwitchTo(tcb_t *target)
523{
524#ifdef CONFIG_KERNEL_MCS
525    if (target->tcbSchedContext != NULL && !thread_state_get_tcbInReleaseQueue(target->tcbState)) {
526#endif
527        if (ksCurDomain != target->tcbDomain
528            SMP_COND_STATEMENT( || target->tcbAffinity != getCurrentCPUIndex())) {
529            SCHED_ENQUEUE(target);
530        } else if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread) {
531            /* Too many threads want special treatment, use regular queues. */
532            rescheduleRequired();
533            SCHED_ENQUEUE(target);
534        } else {
535            NODE_STATE(ksSchedulerAction) = target;
536        }
537#ifdef CONFIG_KERNEL_MCS
538    }
539#endif
540
541}
542
543void setThreadState(tcb_t *tptr, _thread_state_t ts)
544{
545    thread_state_ptr_set_tsType(&tptr->tcbState, ts);
546    scheduleTCB(tptr);
547}
548
549void scheduleTCB(tcb_t *tptr)
550{
551    if (tptr == NODE_STATE(ksCurThread) &&
552        NODE_STATE(ksSchedulerAction) == SchedulerAction_ResumeCurrentThread &&
553        !isSchedulable(tptr)) {
554        rescheduleRequired();
555    }
556}
557
558#ifdef CONFIG_KERNEL_MCS
559void postpone(sched_context_t *sc)
560{
561    tcbSchedDequeue(sc->scTcb);
562    tcbReleaseEnqueue(sc->scTcb);
563    NODE_STATE_ON_CORE(ksReprogram, sc->scCore) = true;
564}
565
566void setNextInterrupt(void)
567{
568    time_t next_interrupt = NODE_STATE(ksCurTime) +
569                            refill_head(NODE_STATE(ksCurThread)->tcbSchedContext)->rAmount;
570
571    if (CONFIG_NUM_DOMAINS > 1) {
572        next_interrupt = MIN(next_interrupt, NODE_STATE(ksCurTime) + ksDomainTime);
573    }
574
575    if (NODE_STATE(ksReleaseHead) != NULL) {
576        next_interrupt = MIN(refill_head(NODE_STATE(ksReleaseHead)->tcbSchedContext)->rTime, next_interrupt);
577    }
578
579    setDeadline(next_interrupt - getTimerPrecision());
580}
581
582void chargeBudget(ticks_t consumed, bool_t canTimeoutFault, word_t core, bool_t isCurCPU)
583{
584
585    if (isRoundRobin(NODE_STATE_ON_CORE(ksCurSC, core))) {
586        assert(refill_size(NODE_STATE_ON_CORE(ksCurSC, core)) == MIN_REFILLS);
587        refill_head(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount += refill_tail(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount;
588        refill_tail(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount = 0;
589    } else {
590        refill_budget_check(consumed);
591    }
592
593    assert(refill_head(NODE_STATE_ON_CORE(ksCurSC, core))->rAmount >= MIN_BUDGET);
594    NODE_STATE_ON_CORE(ksCurSC, core)->scConsumed += consumed;
595    NODE_STATE_ON_CORE(ksConsumed, core) = 0;
596    if (isCurCPU && likely(isSchedulable(NODE_STATE_ON_CORE(ksCurThread, core)))) {
597        assert(NODE_STATE(ksCurThread)->tcbSchedContext == NODE_STATE(ksCurSC));
598        endTimeslice(canTimeoutFault);
599        rescheduleRequired();
600        NODE_STATE(ksReprogram) = true;
601    }
602}
603
604void endTimeslice(bool_t can_timeout_fault)
605{
606    if (can_timeout_fault && !isRoundRobin(NODE_STATE(ksCurSC)) && validTimeoutHandler(NODE_STATE(ksCurThread))) {
607        current_fault = seL4_Fault_Timeout_new(NODE_STATE(ksCurSC)->scBadge);
608        handleTimeout(NODE_STATE(ksCurThread));
609    } else if (refill_ready(NODE_STATE(ksCurSC)) && refill_sufficient(NODE_STATE(ksCurSC), 0)) {
610        /* apply round robin */
611        assert(refill_sufficient(NODE_STATE(ksCurSC), 0));
612        assert(!thread_state_get_tcbQueued(NODE_STATE(ksCurThread)->tcbState));
613        SCHED_APPEND_CURRENT_TCB;
614    } else {
615        /* postpone until ready */
616        postpone(NODE_STATE(ksCurSC));
617    }
618}
619#else
620
621void timerTick(void)
622{
623    if (likely(thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) ==
624               ThreadState_Running)
625#ifdef CONFIG_VTX
626        || thread_state_get_tsType(NODE_STATE(ksCurThread)->tcbState) ==
627        ThreadState_RunningVM
628#endif
629       ) {
630        if (NODE_STATE(ksCurThread)->tcbTimeSlice > 1) {
631            NODE_STATE(ksCurThread)->tcbTimeSlice--;
632        } else {
633            NODE_STATE(ksCurThread)->tcbTimeSlice = CONFIG_TIME_SLICE;
634            SCHED_APPEND_CURRENT_TCB;
635            rescheduleRequired();
636        }
637    }
638
639    if (CONFIG_NUM_DOMAINS > 1) {
640        ksDomainTime--;
641        if (ksDomainTime == 0) {
642            rescheduleRequired();
643        }
644    }
645}
646#endif
647
648void rescheduleRequired(void)
649{
650    if (NODE_STATE(ksSchedulerAction) != SchedulerAction_ResumeCurrentThread
651        && NODE_STATE(ksSchedulerAction) != SchedulerAction_ChooseNewThread
652#ifdef CONFIG_KERNEL_MCS
653        && isSchedulable(NODE_STATE(ksSchedulerAction))
654#endif
655       ) {
656#ifdef CONFIG_KERNEL_MCS
657        assert(refill_sufficient(NODE_STATE(ksSchedulerAction)->tcbSchedContext, 0));
658        assert(refill_ready(NODE_STATE(ksSchedulerAction)->tcbSchedContext));
659#endif
660        SCHED_ENQUEUE(NODE_STATE(ksSchedulerAction));
661    }
662    NODE_STATE(ksSchedulerAction) = SchedulerAction_ChooseNewThread;
663}
664
665#ifdef CONFIG_KERNEL_MCS
666void awaken(void)
667{
668    while (unlikely(NODE_STATE(ksReleaseHead) != NULL && refill_ready(NODE_STATE(ksReleaseHead)->tcbSchedContext))) {
669        tcb_t *awakened = tcbReleaseDequeue();
670        /* the currently running thread cannot have just woken up */
671        assert(awakened != NODE_STATE(ksCurThread));
672        /* round robin threads should not be in the release queue */
673        assert(!isRoundRobin(awakened->tcbSchedContext));
674        /* threads should wake up on the correct core */
675        SMP_COND_STATEMENT(assert(awakened->tcbAffinity == getCurrentCPUIndex()));
676        /* threads HEAD refill should always be > MIN_BUDGET */
677        assert(refill_sufficient(awakened->tcbSchedContext, 0));
678        possibleSwitchTo(awakened);
679        /* changed head of release queue -> need to reprogram */
680        NODE_STATE(ksReprogram) = true;
681    }
682}
683#endif
684