1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#include <types.h>
8#include <string.h>
9#include <sel4/constants.h>
10#include <kernel/thread.h>
11#include <kernel/vspace.h>
12#include <machine/registerset.h>
13#include <model/statedata.h>
14#include <object/notification.h>
15#include <object/cnode.h>
16#include <object/endpoint.h>
17#include <object/tcb.h>
18
19static inline void ep_ptr_set_queue(endpoint_t *epptr, tcb_queue_t queue)
20{
21    endpoint_ptr_set_epQueue_head(epptr, (word_t)queue.head);
22    endpoint_ptr_set_epQueue_tail(epptr, (word_t)queue.end);
23}
24
25#ifdef CONFIG_KERNEL_MCS
26void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
27             bool_t canGrant, bool_t canGrantReply, bool_t canDonate, tcb_t *thread, endpoint_t *epptr)
28#else
29void sendIPC(bool_t blocking, bool_t do_call, word_t badge,
30             bool_t canGrant, bool_t canGrantReply, tcb_t *thread, endpoint_t *epptr)
31#endif
32{
33    switch (endpoint_ptr_get_state(epptr)) {
34    case EPState_Idle:
35    case EPState_Send:
36        if (blocking) {
37            tcb_queue_t queue;
38
39            /* Set thread state to BlockedOnSend */
40            thread_state_ptr_set_tsType(&thread->tcbState,
41                                        ThreadState_BlockedOnSend);
42            thread_state_ptr_set_blockingObject(
43                &thread->tcbState, EP_REF(epptr));
44            thread_state_ptr_set_blockingIPCBadge(
45                &thread->tcbState, badge);
46            thread_state_ptr_set_blockingIPCCanGrant(
47                &thread->tcbState, canGrant);
48            thread_state_ptr_set_blockingIPCCanGrantReply(
49                &thread->tcbState, canGrantReply);
50            thread_state_ptr_set_blockingIPCIsCall(
51                &thread->tcbState, do_call);
52
53            scheduleTCB(thread);
54
55            /* Place calling thread in endpoint queue */
56            queue = ep_ptr_get_queue(epptr);
57            queue = tcbEPAppend(thread, queue);
58            endpoint_ptr_set_state(epptr, EPState_Send);
59            ep_ptr_set_queue(epptr, queue);
60        }
61        break;
62
63    case EPState_Recv: {
64        tcb_queue_t queue;
65        tcb_t *dest;
66
67        /* Get the head of the endpoint queue. */
68        queue = ep_ptr_get_queue(epptr);
69        dest = queue.head;
70
71        /* Haskell error "Receive endpoint queue must not be empty" */
72        assert(dest);
73
74        /* Dequeue the first TCB */
75        queue = tcbEPDequeue(dest, queue);
76        ep_ptr_set_queue(epptr, queue);
77
78        if (!queue.head) {
79            endpoint_ptr_set_state(epptr, EPState_Idle);
80        }
81
82        /* Do the transfer */
83        doIPCTransfer(thread, epptr, badge, canGrant, dest);
84
85#ifdef CONFIG_KERNEL_MCS
86        reply_t *reply = REPLY_PTR(thread_state_get_replyObject(dest->tcbState));
87        if (reply) {
88            reply_unlink(reply, dest);
89        }
90
91        if (do_call ||
92            seL4_Fault_ptr_get_seL4_FaultType(&thread->tcbFault) != seL4_Fault_NullFault) {
93            if (reply != NULL && (canGrant || canGrantReply)) {
94                reply_push(thread, dest, reply, canDonate);
95            } else {
96                setThreadState(thread, ThreadState_Inactive);
97            }
98        } else if (canDonate && dest->tcbSchedContext == NULL) {
99            schedContext_donate(thread->tcbSchedContext, dest);
100        }
101
102        /* blocked threads should have enough budget to get out of the kernel */
103        assert(dest->tcbSchedContext == NULL || refill_sufficient(dest->tcbSchedContext, 0));
104        assert(dest->tcbSchedContext == NULL || refill_ready(dest->tcbSchedContext));
105        setThreadState(dest, ThreadState_Running);
106        possibleSwitchTo(dest);
107#else
108        bool_t replyCanGrant = thread_state_ptr_get_blockingIPCCanGrant(&dest->tcbState);;
109
110        setThreadState(dest, ThreadState_Running);
111        possibleSwitchTo(dest);
112
113        if (do_call) {
114            if (canGrant || canGrantReply) {
115                setupCallerCap(thread, dest, replyCanGrant);
116            } else {
117                setThreadState(thread, ThreadState_Inactive);
118            }
119        }
120#endif
121        break;
122    }
123    }
124}
125
126#ifdef CONFIG_KERNEL_MCS
127void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking, cap_t replyCap)
128#else
129void receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking)
130#endif
131{
132    endpoint_t *epptr;
133    notification_t *ntfnPtr;
134
135    /* Haskell error "receiveIPC: invalid cap" */
136    assert(cap_get_capType(cap) == cap_endpoint_cap);
137
138    epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap));
139
140#ifdef CONFIG_KERNEL_MCS
141    reply_t *replyPtr = NULL;
142    if (cap_get_capType(replyCap) == cap_reply_cap) {
143        replyPtr = REPLY_PTR(cap_reply_cap_get_capReplyPtr(replyCap));
144        if (unlikely(replyPtr->replyTCB != NULL && replyPtr->replyTCB != thread)) {
145            userError("Reply object already has unexecuted reply!");
146            cancelIPC(replyPtr->replyTCB);
147        }
148    }
149#endif
150
151    /* Check for anything waiting in the notification */
152    ntfnPtr = thread->tcbBoundNotification;
153    if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) {
154        completeSignal(ntfnPtr, thread);
155    } else {
156#ifdef CONFIG_KERNEL_MCS
157        /* If this is a blocking recv and we didn't have a pending notification,
158         * then if we are running on an SC from a bound notification, then we
159         * need to return it so that we can passively wait on the EP for potentially
160         * SC donations from client threads.
161         */
162        if (ntfnPtr && isBlocking) {
163            maybeReturnSchedContext(ntfnPtr, thread);
164        }
165#endif
166        switch (endpoint_ptr_get_state(epptr)) {
167        case EPState_Idle:
168        case EPState_Recv: {
169            tcb_queue_t queue;
170
171            if (isBlocking) {
172                /* Set thread state to BlockedOnReceive */
173                thread_state_ptr_set_tsType(&thread->tcbState,
174                                            ThreadState_BlockedOnReceive);
175                thread_state_ptr_set_blockingObject(
176                    &thread->tcbState, EP_REF(epptr));
177#ifdef CONFIG_KERNEL_MCS
178                thread_state_ptr_set_replyObject(&thread->tcbState, REPLY_REF(replyPtr));
179                if (replyPtr) {
180                    replyPtr->replyTCB = thread;
181                }
182#else
183                thread_state_ptr_set_blockingIPCCanGrant(
184                    &thread->tcbState, cap_endpoint_cap_get_capCanGrant(cap));
185#endif
186                scheduleTCB(thread);
187
188                /* Place calling thread in endpoint queue */
189                queue = ep_ptr_get_queue(epptr);
190                queue = tcbEPAppend(thread, queue);
191                endpoint_ptr_set_state(epptr, EPState_Recv);
192                ep_ptr_set_queue(epptr, queue);
193            } else {
194                doNBRecvFailedTransfer(thread);
195            }
196            break;
197        }
198
199        case EPState_Send: {
200            tcb_queue_t queue;
201            tcb_t *sender;
202            word_t badge;
203            bool_t canGrant;
204            bool_t canGrantReply;
205            bool_t do_call;
206
207            /* Get the head of the endpoint queue. */
208            queue = ep_ptr_get_queue(epptr);
209            sender = queue.head;
210
211            /* Haskell error "Send endpoint queue must not be empty" */
212            assert(sender);
213
214            /* Dequeue the first TCB */
215            queue = tcbEPDequeue(sender, queue);
216            ep_ptr_set_queue(epptr, queue);
217
218            if (!queue.head) {
219                endpoint_ptr_set_state(epptr, EPState_Idle);
220            }
221
222            /* Get sender IPC details */
223            badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState);
224            canGrant =
225                thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState);
226            canGrantReply =
227                thread_state_ptr_get_blockingIPCCanGrantReply(&sender->tcbState);
228
229            /* Do the transfer */
230            doIPCTransfer(sender, epptr, badge,
231                          canGrant, thread);
232
233            do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState);
234
235#ifdef CONFIG_KERNEL_MCS
236            if (do_call ||
237                seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) {
238                if ((canGrant || canGrantReply) && replyPtr != NULL) {
239                    bool_t canDonate = sender->tcbSchedContext != NULL
240                                       && seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_Timeout;
241                    reply_push(sender, thread, replyPtr, canDonate);
242                } else {
243                    setThreadState(sender, ThreadState_Inactive);
244                }
245            } else {
246                setThreadState(sender, ThreadState_Running);
247                possibleSwitchTo(sender);
248                assert(sender->tcbSchedContext == NULL || refill_sufficient(sender->tcbSchedContext, 0));
249            }
250#else
251            if (do_call) {
252                if (canGrant || canGrantReply) {
253                    setupCallerCap(sender, thread, cap_endpoint_cap_get_capCanGrant(cap));
254                } else {
255                    setThreadState(sender, ThreadState_Inactive);
256                }
257            } else {
258                setThreadState(sender, ThreadState_Running);
259                possibleSwitchTo(sender);
260            }
261#endif
262            break;
263        }
264        }
265    }
266}
267
268void replyFromKernel_error(tcb_t *thread)
269{
270    word_t len;
271    word_t *ipcBuffer;
272
273    ipcBuffer = lookupIPCBuffer(true, thread);
274    setRegister(thread, badgeRegister, 0);
275    len = setMRs_syscall_error(thread, ipcBuffer);
276
277#ifdef CONFIG_KERNEL_INVOCATION_REPORT_ERROR_IPC
278    char *debugBuffer = (char *)(ipcBuffer + DEBUG_MESSAGE_START + 1);
279    word_t add = strlcpy(debugBuffer, (char *)current_debug_error.errorMessage,
280                         DEBUG_MESSAGE_MAXLEN * sizeof(word_t));
281
282    len += (add / sizeof(word_t)) + 1;
283#endif
284
285    setRegister(thread, msgInfoRegister, wordFromMessageInfo(
286                    seL4_MessageInfo_new(current_syscall_error.type, 0, 0, len)));
287}
288
289void replyFromKernel_success_empty(tcb_t *thread)
290{
291    setRegister(thread, badgeRegister, 0);
292    setRegister(thread, msgInfoRegister, wordFromMessageInfo(
293                    seL4_MessageInfo_new(0, 0, 0, 0)));
294}
295
296void cancelIPC(tcb_t *tptr)
297{
298    thread_state_t *state = &tptr->tcbState;
299
300#ifdef CONFIG_KERNEL_MCS
301    /* cancel ipc cancels all faults */
302    seL4_Fault_NullFault_ptr_new(&tptr->tcbFault);
303#endif
304
305    switch (thread_state_ptr_get_tsType(state)) {
306    case ThreadState_BlockedOnSend:
307    case ThreadState_BlockedOnReceive: {
308        /* blockedIPCCancel state */
309        endpoint_t *epptr;
310        tcb_queue_t queue;
311
312        epptr = EP_PTR(thread_state_ptr_get_blockingObject(state));
313
314        /* Haskell error "blockedIPCCancel: endpoint must not be idle" */
315        assert(endpoint_ptr_get_state(epptr) != EPState_Idle);
316
317        /* Dequeue TCB */
318        queue = ep_ptr_get_queue(epptr);
319        queue = tcbEPDequeue(tptr, queue);
320        ep_ptr_set_queue(epptr, queue);
321
322        if (!queue.head) {
323            endpoint_ptr_set_state(epptr, EPState_Idle);
324        }
325
326#ifdef CONFIG_KERNEL_MCS
327        reply_t *reply = REPLY_PTR(thread_state_get_replyObject(tptr->tcbState));
328        if (reply != NULL) {
329            reply_unlink(reply, tptr);
330        }
331#endif
332        setThreadState(tptr, ThreadState_Inactive);
333        break;
334    }
335
336    case ThreadState_BlockedOnNotification:
337        cancelSignal(tptr,
338                     NTFN_PTR(thread_state_ptr_get_blockingObject(state)));
339        break;
340
341    case ThreadState_BlockedOnReply: {
342#ifdef CONFIG_KERNEL_MCS
343        reply_remove_tcb(tptr);
344#else
345        cte_t *slot, *callerCap;
346
347        tptr->tcbFault = seL4_Fault_NullFault_new();
348
349        /* Get the reply cap slot */
350        slot = TCB_PTR_CTE_PTR(tptr, tcbReply);
351
352        callerCap = CTE_PTR(mdb_node_get_mdbNext(slot->cteMDBNode));
353        if (callerCap) {
354            /** GHOSTUPD: "(True,
355                gs_set_assn cteDeleteOne_'proc (ucast cap_reply_cap))" */
356            cteDeleteOne(callerCap);
357        }
358#endif
359
360        break;
361    }
362    }
363}
364
365void cancelAllIPC(endpoint_t *epptr)
366{
367    switch (endpoint_ptr_get_state(epptr)) {
368    case EPState_Idle:
369        break;
370
371    default: {
372        tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr));
373
374        /* Make endpoint idle */
375        endpoint_ptr_set_state(epptr, EPState_Idle);
376        endpoint_ptr_set_epQueue_head(epptr, 0);
377        endpoint_ptr_set_epQueue_tail(epptr, 0);
378
379        /* Set all blocked threads to restart */
380        for (; thread; thread = thread->tcbEPNext) {
381#ifdef CONFIG_KERNEL_MCS
382            reply_t *reply = REPLY_PTR(thread_state_get_replyObject(thread->tcbState));
383            if (reply != NULL) {
384                reply_unlink(reply, thread);
385            }
386            if (seL4_Fault_get_seL4_FaultType(thread->tcbFault) == seL4_Fault_NullFault) {
387                setThreadState(thread, ThreadState_Restart);
388                possibleSwitchTo(thread);
389            } else {
390                setThreadState(thread, ThreadState_Inactive);
391            }
392#else
393            setThreadState(thread, ThreadState_Restart);
394            SCHED_ENQUEUE(thread);
395#endif
396        }
397
398        rescheduleRequired();
399        break;
400    }
401    }
402}
403
404void cancelBadgedSends(endpoint_t *epptr, word_t badge)
405{
406    switch (endpoint_ptr_get_state(epptr)) {
407    case EPState_Idle:
408    case EPState_Recv:
409        break;
410
411    case EPState_Send: {
412        tcb_t *thread, *next;
413        tcb_queue_t queue = ep_ptr_get_queue(epptr);
414
415        /* this is a de-optimisation for verification
416         * reasons. it allows the contents of the endpoint
417         * queue to be ignored during the for loop. */
418        endpoint_ptr_set_state(epptr, EPState_Idle);
419        endpoint_ptr_set_epQueue_head(epptr, 0);
420        endpoint_ptr_set_epQueue_tail(epptr, 0);
421
422        for (thread = queue.head; thread; thread = next) {
423            word_t b = thread_state_ptr_get_blockingIPCBadge(
424                           &thread->tcbState);
425            next = thread->tcbEPNext;
426#ifdef CONFIG_KERNEL_MCS
427            /* senders do not have reply objects in their state, and we are only cancelling sends */
428            assert(REPLY_PTR(thread_state_get_replyObject(thread->tcbState)) == NULL);
429            if (b == badge) {
430                if (seL4_Fault_get_seL4_FaultType(thread->tcbFault) ==
431                    seL4_Fault_NullFault) {
432                    setThreadState(thread, ThreadState_Restart);
433                    possibleSwitchTo(thread);
434                } else {
435                    setThreadState(thread, ThreadState_Inactive);
436                }
437                queue = tcbEPDequeue(thread, queue);
438            }
439#else
440            if (b == badge) {
441                setThreadState(thread, ThreadState_Restart);
442                SCHED_ENQUEUE(thread);
443                queue = tcbEPDequeue(thread, queue);
444            }
445#endif
446        }
447        ep_ptr_set_queue(epptr, queue);
448
449        if (queue.head) {
450            endpoint_ptr_set_state(epptr, EPState_Send);
451        }
452
453        rescheduleRequired();
454
455        break;
456    }
457
458    default:
459        fail("invalid EP state");
460    }
461}
462
463#ifdef CONFIG_KERNEL_MCS
464void reorderEP(endpoint_t *epptr, tcb_t *thread)
465{
466    tcb_queue_t queue = ep_ptr_get_queue(epptr);
467    queue = tcbEPDequeue(thread, queue);
468    queue = tcbEPAppend(thread, queue);
469    ep_ptr_set_queue(epptr, queue);
470}
471#endif
472