1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <types.h>
10#include <util.h>
11#include <object/structures.h>
12#include <arch/machine.h>
13#ifdef CONFIG_KERNEL_MCS
14#include <kernel/sporadic.h>
15#include <machine/timer.h>
16#include <mode/machine.h>
17#endif
18
19static inline CONST word_t ready_queues_index(word_t dom, word_t prio)
20{
21    if (CONFIG_NUM_DOMAINS > 1) {
22        return dom * CONFIG_NUM_PRIORITIES + prio;
23    } else {
24        assert(dom == 0);
25        return prio;
26    }
27}
28
29static inline CONST word_t prio_to_l1index(word_t prio)
30{
31    return (prio >> wordRadix);
32}
33
34static inline CONST word_t l1index_to_prio(word_t l1index)
35{
36    return (l1index << wordRadix);
37}
38
39static inline bool_t PURE isRunnable(const tcb_t *thread)
40{
41    switch (thread_state_get_tsType(thread->tcbState)) {
42    case ThreadState_Running:
43    case ThreadState_Restart:
44#ifdef CONFIG_VTX
45    case ThreadState_RunningVM:
46#endif
47        return true;
48
49    default:
50        return false;
51    }
52}
53
54static inline CONST word_t invert_l1index(word_t l1index)
55{
56    word_t inverted = (L2_BITMAP_SIZE - 1 - l1index);
57    assert(inverted < L2_BITMAP_SIZE);
58    return inverted;
59}
60
61static inline prio_t getHighestPrio(word_t dom)
62{
63    word_t l1index;
64    word_t l2index;
65    word_t l1index_inverted;
66
67    /* it's undefined to call clzl on 0 */
68    assert(NODE_STATE(ksReadyQueuesL1Bitmap)[dom] != 0);
69
70    l1index = wordBits - 1 - clzl(NODE_STATE(ksReadyQueuesL1Bitmap)[dom]);
71    l1index_inverted = invert_l1index(l1index);
72    assert(NODE_STATE(ksReadyQueuesL2Bitmap)[dom][l1index_inverted] != 0);
73    l2index = wordBits - 1 - clzl(NODE_STATE(ksReadyQueuesL2Bitmap)[dom][l1index_inverted]);
74    return (l1index_to_prio(l1index) | l2index);
75}
76
77static inline bool_t isHighestPrio(word_t dom, prio_t prio)
78{
79    return NODE_STATE(ksReadyQueuesL1Bitmap)[dom] == 0 ||
80           prio >= getHighestPrio(dom);
81}
82
83static inline bool_t PURE isBlocked(const tcb_t *thread)
84{
85    switch (thread_state_get_tsType(thread->tcbState)) {
86    case ThreadState_BlockedOnReceive:
87    case ThreadState_BlockedOnSend:
88    case ThreadState_BlockedOnNotification:
89    case ThreadState_BlockedOnReply:
90        return true;
91
92    default:
93        return false;
94    }
95}
96
97static inline bool_t PURE isStopped(const tcb_t *thread)
98{
99    switch (thread_state_get_tsType(thread->tcbState)) {
100    case ThreadState_Inactive:
101    case ThreadState_BlockedOnReceive:
102    case ThreadState_BlockedOnSend:
103    case ThreadState_BlockedOnNotification:
104    case ThreadState_BlockedOnReply:
105        return true;
106
107    default:
108        return false;
109    }
110}
111
112#ifdef CONFIG_KERNEL_MCS
113static inline bool_t PURE isRoundRobin(sched_context_t *sc)
114{
115    return sc->scPeriod == 0;
116}
117
118static inline bool_t isCurDomainExpired(void)
119{
120    return CONFIG_NUM_DOMAINS > 1 &&
121           ksDomainTime < (NODE_STATE(ksConsumed) + MIN_BUDGET);
122}
123
124static inline void commitTime(void)
125{
126    if (NODE_STATE(ksCurSC)->scRefillMax) {
127        if (likely(NODE_STATE(ksConsumed) > 0)) {
128            /* if this function is called the head refil must be sufficient to
129             * charge ksConsumed */
130            assert(refill_sufficient(NODE_STATE(ksCurSC), NODE_STATE(ksConsumed)));
131            /* and it must be ready to use */
132            assert(refill_ready(NODE_STATE(ksCurSC)));
133
134            if (isRoundRobin(NODE_STATE(ksCurSC))) {
135                /* for round robin threads, there are only two refills: the HEAD, which is what
136                 * we are consuming, and the tail, which is what we have consumed */
137                assert(refill_size(NODE_STATE(ksCurSC)) == MIN_REFILLS);
138                refill_head(NODE_STATE(ksCurSC))->rAmount -= NODE_STATE(ksConsumed);
139                refill_tail(NODE_STATE(ksCurSC))->rAmount += NODE_STATE(ksConsumed);
140            } else {
141                refill_split_check(NODE_STATE(ksConsumed));
142            }
143            assert(refill_sufficient(NODE_STATE(ksCurSC), 0));
144            assert(refill_ready(NODE_STATE(ksCurSC)));
145        }
146        NODE_STATE(ksCurSC)->scConsumed += NODE_STATE(ksConsumed);
147    }
148    if (CONFIG_NUM_DOMAINS > 1) {
149        assert(ksDomainTime > NODE_STATE(ksConsumed));
150        assert(ksDomainTime - NODE_STATE(ksConsumed) >= MIN_BUDGET);
151        if (NODE_STATE(ksConsumed) < ksDomainTime) {
152            ksDomainTime -= NODE_STATE(ksConsumed);
153        } else {
154            ksDomainTime = 0;
155        }
156    }
157
158    NODE_STATE(ksConsumed) = 0llu;
159}
160
161static inline bool_t PURE isSchedulable(const tcb_t *thread)
162{
163    return isRunnable(thread) &&
164           thread->tcbSchedContext != NULL &&
165           thread->tcbSchedContext->scRefillMax > 0 &&
166           !thread_state_get_tcbInReleaseQueue(thread->tcbState);
167}
168#else
169#define isSchedulable isRunnable
170#endif
171
172void configureIdleThread(tcb_t *tcb);
173void activateThread(void);
174void suspend(tcb_t *target);
175void restart(tcb_t *target);
176void doIPCTransfer(tcb_t *sender, endpoint_t *endpoint,
177                   word_t badge, bool_t grant, tcb_t *receiver);
178#ifdef CONFIG_KERNEL_MCS
179void doReplyTransfer(tcb_t *sender, reply_t *reply, bool_t grant);
180#else
181void doReplyTransfer(tcb_t *sender, tcb_t *receiver, cte_t *slot, bool_t grant);
182void timerTick(void);
183#endif
184void doNormalTransfer(tcb_t *sender, word_t *sendBuffer, endpoint_t *endpoint,
185                      word_t badge, bool_t canGrant, tcb_t *receiver,
186                      word_t *receiveBuffer);
187void doFaultTransfer(word_t badge, tcb_t *sender, tcb_t *receiver,
188                     word_t *receiverIPCBuffer);
189void doNBRecvFailedTransfer(tcb_t *thread);
190void schedule(void);
191void chooseThread(void);
192void switchToThread(tcb_t *thread);
193void switchToIdleThread(void);
194void setDomain(tcb_t *tptr, dom_t dom);
195void setPriority(tcb_t *tptr, prio_t prio);
196void setMCPriority(tcb_t *tptr, prio_t mcp);
197void scheduleTCB(tcb_t *tptr);
198void possibleSwitchTo(tcb_t *tptr);
199void setThreadState(tcb_t *tptr, _thread_state_t ts);
200void rescheduleRequired(void);
201
202/* declare that the thread has had its registers (in its user_context_t) modified and it
203 * should ignore any 'efficient' restores next time it is run, and instead restore all
204 * registers into their correct place */
205void Arch_postModifyRegisters(tcb_t *tptr);
206
207/* Updates a threads FaultIP to match its NextIP. This is used to indicate that a
208 * thread has completed its fault and by updating the restartPC means that if the thread
209 * should get restarted in the future for any reason it is restart in such a way as to
210 * not cause the fault again. */
211static inline void updateRestartPC(tcb_t *tcb)
212{
213    setRegister(tcb, FaultIP, getRegister(tcb, NextIP));
214}
215
216#ifdef CONFIG_KERNEL_MCS
217/* End the timeslice for the current thread.
218 * This will recharge the threads timeslice and place it at the
219 * end of the scheduling queue for its priority.
220 */
221void endTimeslice(bool_t can_timeout_fault);
222
223/* called when a thread has used up its head refill */
224void chargeBudget(ticks_t consumed, bool_t canTimeoutFault, word_t core, bool_t isCurCPU);
225
226/* Update the kernels timestamp and stores in ksCurTime.
227 * The difference between the previous kernel timestamp and the one just read
228 * is stored in ksConsumed.
229 *
230 * Should be called on every kernel entry
231 * where threads can be billed.
232 */
233static inline void updateTimestamp(void)
234{
235    time_t prev = NODE_STATE(ksCurTime);
236    NODE_STATE(ksCurTime) = getCurrentTime();
237    NODE_STATE(ksConsumed) += (NODE_STATE(ksCurTime) - prev);
238}
239
240/* Check if the current thread/domain budget has expired.
241 * if it has, bill the thread, add it to the scheduler and
242 * set up a reschedule.
243 *
244 * @return true if the thread/domain has enough budget to
245 *              get through the current kernel operation.
246 */
247static inline bool_t checkBudget(void)
248{
249    /* currently running thread must have available capacity */
250    assert(refill_ready(NODE_STATE(ksCurSC)));
251
252    ticks_t capacity = refill_capacity(NODE_STATE(ksCurSC), NODE_STATE(ksConsumed));
253    /* if the budget isn't enough, the timeslice for this SC is over. For
254     * round robin threads this is sufficient, however for periodic threads
255     * we also need to check there is space to schedule the replenishment - if the refill
256     * is full then the timeslice is also over as the rest of the budget is forfeit. */
257    if (likely(capacity >= MIN_BUDGET && (isRoundRobin(NODE_STATE(ksCurSC)) ||
258                                          !refill_full(NODE_STATE(ksCurSC))))) {
259        if (unlikely(isCurDomainExpired())) {
260            NODE_STATE(ksReprogram) = true;
261            rescheduleRequired();
262            return false;
263        }
264        return true;
265    }
266
267    chargeBudget(NODE_STATE(ksConsumed), true, CURRENT_CPU_INDEX(), true);
268    return false;
269}
270
271/* Everything checkBudget does, but also set the thread
272 * state to ThreadState_Restart. To be called from kernel entries
273 * where the operation should be restarted once the current thread
274 * has budget again.
275 */
276
277static inline bool_t checkBudgetRestart(void)
278{
279    assert(isRunnable(NODE_STATE(ksCurThread)));
280    bool_t result = checkBudget();
281    if (!result && isRunnable(NODE_STATE(ksCurThread))) {
282        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
283    }
284    return result;
285}
286
287
288/* Set the next kernel tick, which is either the end of the current
289 * domains timeslice OR the end of the current threads timeslice.
290 */
291void setNextInterrupt(void);
292
293/* Wake any periodic threads that are ready for budget recharge */
294void awaken(void);
295/* Place the thread bound to this scheduling context in the release queue
296 * of periodic threads waiting for budget recharge */
297void postpone(sched_context_t *sc);
298#endif
299
300