1/*
2 * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230)
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <machine/timer.h>
8#include <object/schedcontext.h>
9
10static exception_t invokeSchedContext_UnbindObject(sched_context_t *sc, cap_t cap)
11{
12    switch (cap_get_capType(cap)) {
13    case cap_thread_cap:
14        schedContext_unbindTCB(sc, sc->scTcb);
15        break;
16    case cap_notification_cap:
17        schedContext_unbindNtfn(sc);
18        break;
19    default:
20        fail("invalid cap type");
21    }
22
23    return EXCEPTION_NONE;
24}
25
26static exception_t decodeSchedContext_UnbindObject(sched_context_t *sc, extra_caps_t extraCaps)
27{
28    if (extraCaps.excaprefs[0] == NULL) {
29        userError("SchedContext_Unbind: Truncated message.");
30        current_syscall_error.type = seL4_TruncatedMessage;
31        return EXCEPTION_SYSCALL_ERROR;
32    }
33
34    cap_t cap = extraCaps.excaprefs[0]->cap;
35    switch (cap_get_capType(cap)) {
36    case cap_thread_cap:
37        if (sc->scTcb != TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))) {
38            userError("SchedContext UnbindObject: object not bound");
39            current_syscall_error.type = seL4_IllegalOperation;
40            return EXCEPTION_SYSCALL_ERROR;
41        }
42        if (sc->scTcb == NODE_STATE(ksCurThread)) {
43            userError("SchedContext UnbindObject: cannot unbind sc of current thread");
44            current_syscall_error.type = seL4_IllegalOperation;
45            return EXCEPTION_SYSCALL_ERROR;
46        }
47        break;
48    case cap_notification_cap:
49        if (sc->scNotification != NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap))) {
50            userError("SchedContext UnbindObject: object not bound");
51            current_syscall_error.type = seL4_IllegalOperation;
52            return EXCEPTION_SYSCALL_ERROR;
53        }
54        break;
55
56    default:
57        userError("SchedContext_Unbind: invalid cap");
58        current_syscall_error.type = seL4_InvalidCapability;
59        current_syscall_error.invalidCapNumber = 1;
60        return EXCEPTION_SYSCALL_ERROR;
61
62    }
63
64    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
65    return invokeSchedContext_UnbindObject(sc, cap);
66}
67
68static exception_t invokeSchedContext_Bind(sched_context_t *sc, cap_t cap)
69{
70    switch (cap_get_capType(cap)) {
71    case cap_thread_cap:
72        schedContext_bindTCB(sc, TCB_PTR(cap_thread_cap_get_capTCBPtr(cap)));
73        break;
74    case cap_notification_cap:
75        schedContext_bindNtfn(sc, NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)));
76        break;
77    default:
78        fail("invalid cap type");
79    }
80
81    return EXCEPTION_NONE;
82}
83
84static exception_t decodeSchedContext_Bind(sched_context_t *sc, extra_caps_t extraCaps)
85{
86    if (extraCaps.excaprefs[0] == NULL) {
87        userError("SchedContext_Bind: Truncated Message.");
88        current_syscall_error.type = seL4_TruncatedMessage;
89        return EXCEPTION_SYSCALL_ERROR;
90    }
91
92    cap_t cap = extraCaps.excaprefs[0]->cap;
93
94    if (sc->scTcb != NULL || sc->scNotification != NULL) {
95        userError("SchedContext_Bind: sched context already bound.");
96        current_syscall_error.type = seL4_IllegalOperation;
97        return EXCEPTION_SYSCALL_ERROR;
98    }
99
100    switch (cap_get_capType(cap)) {
101    case cap_thread_cap:
102        if (TCB_PTR(cap_thread_cap_get_capTCBPtr(cap))->tcbSchedContext != NULL) {
103            userError("SchedContext_Bind: tcb already bound.");
104            current_syscall_error.type = seL4_IllegalOperation;
105            return EXCEPTION_SYSCALL_ERROR;
106        }
107
108        break;
109    case cap_notification_cap:
110        if (notification_ptr_get_ntfnSchedContext(NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap)))) {
111            userError("SchedContext_Bind: notification already bound");
112            current_syscall_error.type = seL4_IllegalOperation;
113            return EXCEPTION_SYSCALL_ERROR;
114        }
115        break;
116    default:
117        userError("SchedContext_Bind: invalid cap.");
118        current_syscall_error.type = seL4_InvalidCapability;
119        current_syscall_error.invalidCapNumber = 1;
120        return EXCEPTION_SYSCALL_ERROR;
121    }
122
123    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
124    return invokeSchedContext_Bind(sc, cap);
125}
126
127static exception_t invokeSchedContext_Unbind(sched_context_t *sc)
128{
129    schedContext_unbindAllTCBs(sc);
130    schedContext_unbindNtfn(sc);
131    if (sc->scReply) {
132        sc->scReply->replyNext = call_stack_new(0, false);
133        sc->scReply = NULL;
134    }
135    return EXCEPTION_NONE;
136}
137
138#ifdef ENABLE_SMP_SUPPORT
139static inline void maybeStallSC(sched_context_t *sc)
140{
141    if (sc->scTcb) {
142        remoteTCBStall(sc->scTcb);
143    }
144}
145#endif
146
147static inline void setConsumed(sched_context_t *sc, word_t *buffer)
148{
149    time_t consumed = schedContext_updateConsumed(sc);
150    word_t length = mode_setTimeArg(0, consumed, buffer, NODE_STATE(ksCurThread));
151    setRegister(NODE_STATE(ksCurThread), msgInfoRegister, wordFromMessageInfo(seL4_MessageInfo_new(0, 0, 0, length)));
152}
153
154static exception_t invokeSchedContext_Consumed(sched_context_t *sc, word_t *buffer)
155{
156    setConsumed(sc, buffer);
157    return EXCEPTION_NONE;
158}
159
160static exception_t invokeSchedContext_YieldTo(sched_context_t *sc, word_t *buffer)
161{
162    if (sc->scYieldFrom) {
163        schedContext_completeYieldTo(sc->scYieldFrom);
164        assert(sc->scYieldFrom == NULL);
165    }
166
167    /* if the tcb is in the scheduler, it's ready and sufficient.
168     * Otherwise, check that it is ready and sufficient and if not,
169     * place the thread in the release queue. This way, from this point,
170     * if the thread isSchedulable, it is ready and sufficient.*/
171    schedContext_resume(sc);
172
173    bool_t return_now = true;
174    if (isSchedulable(sc->scTcb)) {
175        refill_unblock_check(sc);
176        if (SMP_COND_STATEMENT(sc->scCore != getCurrentCPUIndex() ||)
177            sc->scTcb->tcbPriority < NODE_STATE(ksCurThread)->tcbPriority) {
178            tcbSchedDequeue(sc->scTcb);
179            SCHED_ENQUEUE(sc->scTcb);
180        } else {
181            NODE_STATE(ksCurThread)->tcbYieldTo = sc;
182            sc->scYieldFrom = NODE_STATE(ksCurThread);
183            tcbSchedDequeue(sc->scTcb);
184            tcbSchedEnqueue(NODE_STATE(ksCurThread));
185            tcbSchedEnqueue(sc->scTcb);
186            rescheduleRequired();
187
188            /* we are scheduling the thread associated with sc,
189             * so we don't need to write to the ipc buffer
190             * until the caller is scheduled again */
191            return_now = false;
192        }
193    }
194
195    if (return_now) {
196        setConsumed(sc, buffer);
197    }
198
199    return EXCEPTION_NONE;
200}
201
202static exception_t decodeSchedContext_YieldTo(sched_context_t *sc, word_t *buffer)
203{
204    if (sc->scTcb == NODE_STATE(ksCurThread)) {
205        userError("SchedContext_YieldTo: cannot seL4_SchedContext_YieldTo on self");
206        current_syscall_error.type = seL4_IllegalOperation;
207        return EXCEPTION_SYSCALL_ERROR;
208    }
209
210    if (sc->scTcb == NULL) {
211        userError("SchedContext_YieldTo: cannot yield to an inactive sched context");
212        current_syscall_error.type = seL4_IllegalOperation;
213        return EXCEPTION_SYSCALL_ERROR;
214    }
215
216    if (sc->scTcb->tcbPriority > NODE_STATE(ksCurThread)->tcbMCP) {
217        userError("SchedContext_YieldTo: insufficient mcp (%lu) to yield to a thread with prio (%lu)",
218                  (unsigned long) NODE_STATE(ksCurThread)->tcbMCP, (unsigned long) sc->scTcb->tcbPriority);
219        current_syscall_error.type = seL4_IllegalOperation;
220        return EXCEPTION_SYSCALL_ERROR;
221    }
222
223    setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
224    return invokeSchedContext_YieldTo(sc, buffer);
225}
226
227exception_t decodeSchedContextInvocation(word_t label, cap_t cap, extra_caps_t extraCaps, word_t *buffer)
228{
229    sched_context_t *sc = SC_PTR(cap_sched_context_cap_get_capSCPtr(cap));
230
231    SMP_COND_STATEMENT((maybeStallSC(sc));)
232
233    switch (label) {
234    case SchedContextConsumed:
235        /* no decode */
236        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
237        return invokeSchedContext_Consumed(sc, buffer);
238    case SchedContextBind:
239        return decodeSchedContext_Bind(sc, extraCaps);
240    case SchedContextUnbindObject:
241        return decodeSchedContext_UnbindObject(sc, extraCaps);
242    case SchedContextUnbind:
243        /* no decode */
244        if (sc->scTcb == NODE_STATE(ksCurThread)) {
245            userError("SchedContext UnbindObject: cannot unbind sc of current thread");
246            current_syscall_error.type = seL4_IllegalOperation;
247            return EXCEPTION_SYSCALL_ERROR;
248        }
249        setThreadState(NODE_STATE(ksCurThread), ThreadState_Restart);
250        return invokeSchedContext_Unbind(sc);
251    case SchedContextYieldTo:
252        return decodeSchedContext_YieldTo(sc, buffer);
253    default:
254        userError("SchedContext invocation: Illegal operation attempted.");
255        current_syscall_error.type = seL4_IllegalOperation;
256        return EXCEPTION_SYSCALL_ERROR;
257    }
258}
259
260void schedContext_resume(sched_context_t *sc)
261{
262    assert(!sc || sc->scTcb != NULL);
263    if (likely(sc) && isSchedulable(sc->scTcb)) {
264        if (!(refill_ready(sc) && refill_sufficient(sc, 0))) {
265            assert(!thread_state_get_tcbQueued(sc->scTcb->tcbState));
266            postpone(sc);
267        }
268    }
269}
270
271void schedContext_bindTCB(sched_context_t *sc, tcb_t *tcb)
272{
273    assert(sc->scTcb == NULL);
274    assert(tcb->tcbSchedContext == NULL);
275
276    tcb->tcbSchedContext = sc;
277    sc->scTcb = tcb;
278
279    SMP_COND_STATEMENT(migrateTCB(tcb, sc->scCore));
280
281    schedContext_resume(sc);
282    if (isSchedulable(tcb)) {
283        SCHED_ENQUEUE(tcb);
284        rescheduleRequired();
285        // TODO -- at some stage we should take this call out of any TCB invocations that
286        // alter capabilities, so that we can do a direct switch. The prefernce here is to
287        // remove seL4_SetSchedParams from using ThreadControl. It's currently out of scope for
288        // verification work, so the work around is to use rescheduleRequired()
289        //possibleSwitchTo(tcb);
290    }
291}
292
293void schedContext_unbindTCB(sched_context_t *sc, tcb_t *tcb)
294{
295    assert(sc->scTcb == tcb);
296
297    /* tcb must already be stalled at this point */
298    if (tcb == NODE_STATE(ksCurThread)) {
299        rescheduleRequired();
300    }
301
302    tcbSchedDequeue(sc->scTcb);
303    tcbReleaseRemove(sc->scTcb);
304
305    sc->scTcb->tcbSchedContext = NULL;
306    sc->scTcb = NULL;
307}
308
309void schedContext_unbindAllTCBs(sched_context_t *sc)
310{
311    if (sc->scTcb) {
312        SMP_COND_STATEMENT(remoteTCBStall(sc->scTcb));
313        schedContext_unbindTCB(sc, sc->scTcb);
314    }
315}
316
317void schedContext_donate(sched_context_t *sc, tcb_t *to)
318{
319    assert(sc != NULL);
320    assert(to != NULL);
321    assert(to->tcbSchedContext == NULL);
322
323    tcb_t *from = sc->scTcb;
324    if (from) {
325        SMP_COND_STATEMENT(remoteTCBStall(from));
326        tcbSchedDequeue(from);
327        from->tcbSchedContext = NULL;
328        if (from == NODE_STATE(ksCurThread) || from == NODE_STATE(ksSchedulerAction)) {
329            rescheduleRequired();
330        }
331    }
332    sc->scTcb = to;
333    to->tcbSchedContext = sc;
334
335    SMP_COND_STATEMENT(migrateTCB(to, sc->scCore));
336}
337
338void schedContext_bindNtfn(sched_context_t *sc, notification_t *ntfn)
339{
340    notification_ptr_set_ntfnSchedContext(ntfn, SC_REF(sc));
341    sc->scNotification = ntfn;
342}
343
344void schedContext_unbindNtfn(sched_context_t *sc)
345{
346    if (sc && sc->scNotification) {
347        notification_ptr_set_ntfnSchedContext(sc->scNotification, SC_REF(0));
348        sc->scNotification = NULL;
349    }
350}
351
352time_t schedContext_updateConsumed(sched_context_t *sc)
353{
354    ticks_t consumed = sc->scConsumed;
355    if (consumed >= getMaxTicksToUs()) {
356        sc->scConsumed -= getMaxTicksToUs();
357        return getMaxTicksToUs();
358    } else {
359        sc->scConsumed = 0;
360        return ticksToUs(consumed);
361    }
362}
363
364void schedContext_cancelYieldTo(tcb_t *tcb)
365{
366    if (tcb && tcb->tcbYieldTo) {
367        tcb->tcbYieldTo->scYieldFrom = NULL;
368        tcb->tcbYieldTo = NULL;
369    }
370}
371
372void schedContext_completeYieldTo(tcb_t *yielder)
373{
374    if (yielder && yielder->tcbYieldTo) {
375        setConsumed(yielder->tcbYieldTo, lookupIPCBuffer(true, yielder));
376        schedContext_cancelYieldTo(yielder);
377    }
378}
379