1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * This software may be distributed and modified according to the terms of
5 * the GNU General Public License version 2. Note that NO WARRANTY is provided.
6 * See "LICENSE_GPLv2.txt" for details.
7 *
8 * @TAG(GD_GPL)
9 */
10
11#include <types.h>
12#include <kernel/thread.h>
13#include <kernel/vspace.h>
14#include <machine/registerset.h>
15#include <model/statedata.h>
16#include <object/notification.h>
17#include <object/cnode.h>
18#include <object/endpoint.h>
19#include <object/tcb.h>
20
21static inline void
22ep_ptr_set_queue(endpoint_t *epptr, tcb_queue_t queue)
23{
24    endpoint_ptr_set_epQueue_head(epptr, (word_t)queue.head);
25    endpoint_ptr_set_epQueue_tail(epptr, (word_t)queue.end);
26}
27
28void
29sendIPC(bool_t blocking, bool_t do_call, word_t badge,
30        bool_t canGrant, bool_t canDonate, tcb_t *thread, endpoint_t *epptr)
31{
32    switch (endpoint_ptr_get_state(epptr)) {
33    case EPState_Idle:
34    case EPState_Send:
35        if (blocking) {
36            tcb_queue_t queue;
37
38            /* Set thread state to BlockedOnSend */
39            thread_state_ptr_set_tsType(&thread->tcbState,
40                                        ThreadState_BlockedOnSend);
41            thread_state_ptr_set_blockingObject(
42                &thread->tcbState, EP_REF(epptr));
43            thread_state_ptr_set_blockingIPCBadge(
44                &thread->tcbState, badge);
45            thread_state_ptr_set_blockingIPCCanGrant(
46                &thread->tcbState, canGrant);
47            thread_state_ptr_set_blockingIPCIsCall(
48                &thread->tcbState, do_call);
49
50            scheduleTCB(thread);
51
52            /* Place calling thread in endpoint queue */
53            queue = ep_ptr_get_queue(epptr);
54            queue = tcbEPAppend(thread, queue);
55            endpoint_ptr_set_state(epptr, EPState_Send);
56            ep_ptr_set_queue(epptr, queue);
57        }
58        break;
59
60    case EPState_Recv: {
61        tcb_queue_t queue;
62        tcb_t *dest;
63
64        /* Get the head of the endpoint queue. */
65        queue = ep_ptr_get_queue(epptr);
66        dest = queue.head;
67
68        /* Haskell error "Receive endpoint queue must not be empty" */
69        assert(dest);
70
71        /* Dequeue the first TCB */
72        queue = tcbEPDequeue(dest, queue);
73        ep_ptr_set_queue(epptr, queue);
74
75        if (!queue.head) {
76            endpoint_ptr_set_state(epptr, EPState_Idle);
77        }
78
79        /* Do the transfer */
80        doIPCTransfer(thread, epptr, badge, canGrant, dest);
81
82        reply_t *reply = REPLY_PTR(thread_state_get_replyObject(dest->tcbState));
83        if (reply) {
84            reply_unlink(reply);
85        }
86
87        if (do_call ||
88                seL4_Fault_ptr_get_seL4_FaultType(&thread->tcbFault) != seL4_Fault_NullFault) {
89            if (reply != NULL && canGrant) {
90                reply_push(thread, dest, reply, canDonate);
91            }
92        } else if (canDonate && dest->tcbSchedContext == NULL) {
93            schedContext_donate(thread->tcbSchedContext, dest);
94        }
95
96        /* blocked threads should have enough budget to get out of the kernel */
97        assert(dest->tcbSchedContext == NULL || refill_sufficient(dest->tcbSchedContext, 0));
98        assert(dest->tcbSchedContext == NULL || refill_ready(dest->tcbSchedContext));
99        setThreadState(dest, ThreadState_Running);
100        possibleSwitchTo(dest);
101        break;
102    }
103    }
104}
105
106void
107receiveIPC(tcb_t *thread, cap_t cap, bool_t isBlocking, cap_t replyCap)
108{
109    endpoint_t *epptr;
110    notification_t *ntfnPtr;
111
112    /* Haskell error "receiveIPC: invalid cap" */
113    assert(cap_get_capType(cap) == cap_endpoint_cap);
114
115    epptr = EP_PTR(cap_endpoint_cap_get_capEPPtr(cap));
116
117    reply_t *replyPtr = NULL;
118    if (cap_get_capType(replyCap) == cap_reply_cap) {
119        replyPtr = REPLY_PTR(cap_reply_cap_get_capReplyPtr(replyCap));
120        if (unlikely(replyPtr->replyTCB != NULL && replyPtr->replyTCB != thread)) {
121            userError("Reply object already has unexecuted reply!");
122            cancelIPC(replyPtr->replyTCB);
123        }
124    }
125
126    /* Check for anything waiting in the notification */
127    ntfnPtr = thread->tcbBoundNotification;
128    if (ntfnPtr && notification_ptr_get_state(ntfnPtr) == NtfnState_Active) {
129        completeSignal(ntfnPtr, thread);
130    } else {
131        switch (endpoint_ptr_get_state(epptr)) {
132        case EPState_Idle:
133        case EPState_Recv: {
134            tcb_queue_t queue;
135
136            if (isBlocking) {
137                /* Set thread state to BlockedOnReceive */
138                thread_state_ptr_set_tsType(&thread->tcbState,
139                                            ThreadState_BlockedOnReceive);
140                thread_state_ptr_set_blockingObject(
141                    &thread->tcbState, EP_REF(epptr));
142                thread_state_ptr_set_replyObject(&thread->tcbState, REPLY_REF(replyPtr));
143                if (replyPtr) {
144                    replyPtr->replyTCB = thread;
145                }
146
147                scheduleTCB(thread);
148
149                /* Place calling thread in endpoint queue */
150                queue = ep_ptr_get_queue(epptr);
151                queue = tcbEPAppend(thread, queue);
152                endpoint_ptr_set_state(epptr, EPState_Recv);
153                ep_ptr_set_queue(epptr, queue);
154            } else {
155                doNBRecvFailedTransfer(thread);
156            }
157            break;
158        }
159
160        case EPState_Send: {
161            tcb_queue_t queue;
162            tcb_t *sender;
163            word_t badge;
164            bool_t canGrant;
165            bool_t do_call;
166
167            /* Get the head of the endpoint queue. */
168            queue = ep_ptr_get_queue(epptr);
169            sender = queue.head;
170
171            /* Haskell error "Send endpoint queue must not be empty" */
172            assert(sender);
173
174            /* Dequeue the first TCB */
175            queue = tcbEPDequeue(sender, queue);
176            ep_ptr_set_queue(epptr, queue);
177
178            if (!queue.head) {
179                endpoint_ptr_set_state(epptr, EPState_Idle);
180            }
181
182            /* Get sender IPC details */
183            badge = thread_state_ptr_get_blockingIPCBadge(&sender->tcbState);
184            canGrant =
185                thread_state_ptr_get_blockingIPCCanGrant(&sender->tcbState);
186
187            /* Do the transfer */
188            doIPCTransfer(sender, epptr, badge,
189                          canGrant, thread);
190
191            do_call = thread_state_ptr_get_blockingIPCIsCall(&sender->tcbState);
192
193            if (do_call ||
194                    seL4_Fault_get_seL4_FaultType(sender->tcbFault) != seL4_Fault_NullFault) {
195                if (canGrant && replyPtr != NULL) {
196                    reply_push(sender, thread, replyPtr, sender->tcbSchedContext != NULL);
197                } else {
198                    setThreadState(sender, ThreadState_Inactive);
199                }
200            } else {
201                setThreadState(sender, ThreadState_Running);
202                possibleSwitchTo(sender);
203                assert(sender->tcbSchedContext == NULL || refill_sufficient(sender->tcbSchedContext, 0));
204            }
205
206            break;
207        }
208        }
209    }
210}
211
212void
213replyFromKernel_error(tcb_t *thread)
214{
215    word_t len;
216    word_t *ipcBuffer;
217
218    ipcBuffer = lookupIPCBuffer(true, thread);
219    setRegister(thread, badgeRegister, 0);
220    len = setMRs_syscall_error(thread, ipcBuffer);
221    setRegister(thread, msgInfoRegister, wordFromMessageInfo(
222                    seL4_MessageInfo_new(current_syscall_error.type, 0, 0, len)));
223}
224
225void
226replyFromKernel_success_empty(tcb_t *thread)
227{
228    setRegister(thread, badgeRegister, 0);
229    setRegister(thread, msgInfoRegister, wordFromMessageInfo(
230                    seL4_MessageInfo_new(0, 0, 0, 0)));
231}
232
233void
234cancelIPC(tcb_t *tptr)
235{
236    thread_state_t *state = &tptr->tcbState;
237
238    switch (thread_state_ptr_get_tsType(state)) {
239    case ThreadState_BlockedOnSend:
240    case ThreadState_BlockedOnReceive: {
241        /* blockedIPCCancel state */
242        endpoint_t *epptr;
243        tcb_queue_t queue;
244
245        epptr = EP_PTR(thread_state_ptr_get_blockingObject(state));
246
247        /* Haskell error "blockedIPCCancel: endpoint must not be idle" */
248        assert(endpoint_ptr_get_state(epptr) != EPState_Idle);
249
250        /* Dequeue TCB */
251        queue = ep_ptr_get_queue(epptr);
252        queue = tcbEPDequeue(tptr, queue);
253        ep_ptr_set_queue(epptr, queue);
254
255        if (!queue.head) {
256            endpoint_ptr_set_state(epptr, EPState_Idle);
257        }
258
259        reply_t *reply = REPLY_PTR(thread_state_get_replyObject(tptr->tcbState));
260        if (reply != NULL) {
261            reply_unlink(reply);
262        }
263        setThreadState(tptr, ThreadState_Inactive);
264        break;
265    }
266
267    case ThreadState_BlockedOnNotification:
268        cancelSignal(tptr,
269                     NTFN_PTR(thread_state_ptr_get_blockingObject(state)));
270        break;
271
272    case ThreadState_BlockedOnReply: {
273        seL4_Fault_NullFault_ptr_new(&tptr->tcbFault);
274        reply_remove_tcb(tptr);
275        break;
276    }
277    }
278}
279
280void
281cancelAllIPC(endpoint_t *epptr)
282{
283    switch (endpoint_ptr_get_state(epptr)) {
284    case EPState_Idle:
285        break;
286
287    default: {
288        tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr));
289
290        /* Make endpoint idle */
291        endpoint_ptr_set_state(epptr, EPState_Idle);
292        endpoint_ptr_set_epQueue_head(epptr, 0);
293        endpoint_ptr_set_epQueue_tail(epptr, 0);
294
295        /* Set all blocked threads to restart */
296        for (; thread; thread = thread->tcbEPNext) {
297            reply_t *reply = REPLY_PTR(thread_state_get_replyObject(thread->tcbState));
298            if (reply != NULL) {
299                reply_unlink(reply);
300            }
301            setThreadState (thread, ThreadState_Restart);
302            possibleSwitchTo(thread);
303        }
304
305        rescheduleRequired();
306        break;
307    }
308    }
309}
310
311void
312cancelBadgedSends(endpoint_t *epptr, word_t badge)
313{
314    switch (endpoint_ptr_get_state(epptr)) {
315    case EPState_Idle:
316    case EPState_Recv:
317        break;
318
319    case EPState_Send: {
320        tcb_t *thread, *next;
321        tcb_queue_t queue = ep_ptr_get_queue(epptr);
322
323        /* this is a de-optimisation for verification
324         * reasons. it allows the contents of the endpoint
325         * queue to be ignored during the for loop. */
326        endpoint_ptr_set_state(epptr, EPState_Idle);
327        endpoint_ptr_set_epQueue_head(epptr, 0);
328        endpoint_ptr_set_epQueue_tail(epptr, 0);
329
330        for (thread = queue.head; thread; thread = next) {
331            word_t b = thread_state_ptr_get_blockingIPCBadge(
332                           &thread->tcbState);
333            next = thread->tcbEPNext;
334            /* senders do not have reply objects in their state, and we are only cancelling sends */
335            assert(REPLY_PTR(thread_state_get_replyObject(thread->tcbState)) == NULL);
336            if (b == badge) {
337                setThreadState(thread, ThreadState_Restart);
338                possibleSwitchTo(thread);
339                queue = tcbEPDequeue(thread, queue);
340            }
341        }
342        ep_ptr_set_queue(epptr, queue);
343
344        if (queue.head) {
345            endpoint_ptr_set_state(epptr, EPState_Send);
346        }
347
348        rescheduleRequired();
349
350        break;
351    }
352
353    default:
354        fail("invalid EP state");
355    }
356}
357
358void
359reorderEP(endpoint_t *epptr, tcb_t *thread)
360{
361    tcb_queue_t queue = ep_ptr_get_queue(epptr);
362    queue = tcbEPDequeue(thread, queue);
363    queue = tcbEPAppend(thread, queue);
364    ep_ptr_set_queue(epptr, queue);
365}
366