1/*
2 * Copyright 2014, General Dynamics C4 Systems
3 *
4 * SPDX-License-Identifier: GPL-2.0-only
5 */
6
7#pragma once
8
9#include <config.h>
10#include <api/types.h>
11#include <stdint.h>
12#include <arch/object/structures_gen.h>
13#include <mode/types.h>
14#include <sel4/macros.h>
15#include <sel4/arch/constants.h>
16#include <sel4/sel4_arch/constants.h>
17#include <benchmark/benchmark_utilisation_.h>
18
19enum irq_state {
20    IRQInactive  = 0,
21    IRQSignal    = 1,
22    IRQTimer     = 2,
23#ifdef ENABLE_SMP_SUPPORT
24    IRQIPI       = 3,
25#endif
26    IRQReserved
27};
28typedef word_t irq_state_t;
29
30typedef struct dschedule {
31    dom_t domain;
32    word_t length;
33} dschedule_t;
34
35enum asidSizeConstants {
36    asidHighBits = seL4_NumASIDPoolsBits,
37    asidLowBits = seL4_ASIDPoolIndexBits
38};
39
40/* Arch-independent object types */
41enum endpoint_state {
42    EPState_Idle = 0,
43    EPState_Send = 1,
44    EPState_Recv = 2
45};
46typedef word_t endpoint_state_t;
47
48enum notification_state {
49    NtfnState_Idle    = 0,
50    NtfnState_Waiting = 1,
51    NtfnState_Active  = 2
52};
53typedef word_t notification_state_t;
54
55#define EP_PTR(r) ((endpoint_t *)(r))
56#define EP_REF(p) ((word_t)(p))
57
58#define NTFN_PTR(r) ((notification_t *)(r))
59#define NTFN_REF(p) ((word_t)(p))
60
61#define CTE_PTR(r) ((cte_t *)(r))
62#define CTE_REF(p) ((word_t)(p))
63
64#define CNODE_MIN_BITS 1
65#define CNODE_PTR(r) (CTE_PTR(r))
66#define CNODE_REF(p) (CTE_REF(p)>>CNODE_MIN_BITS)
67
68// We would like the actual 'tcb' region (the portion that contains the tcb_t) of the tcb
69// to be as large as possible, but it still needs to be aligned. As the TCB object contains
70// two sub objects the largest we can make either sub object whilst preserving size alignment
71// is half the total size. To halve an object size defined in bits we just subtract 1
72//
73// A diagram of a TCB kernel object that is created from untyped:
74//  _______________________________________
75// |     |             |                   |
76// |     |             |                   |
77// |cte_t|   unused    |       tcb_t       |
78// |     |(debug_tcb_t)|                   |
79// |_____|_____________|___________________|
80// 0     a             b                   c
81// a = tcbCNodeEntries * sizeof(cte_t)
82// b = BIT(TCB_SIZE_BITS)
83// c = BIT(seL4_TCBBits)
84//
85#define TCB_SIZE_BITS (seL4_TCBBits - 1)
86
87#define TCB_CNODE_SIZE_BITS (TCB_CNODE_RADIX + seL4_SlotBits)
88#define TCB_CNODE_RADIX     4
89#define TCB_OFFSET          BIT(TCB_SIZE_BITS)
90
91/* Generate a tcb_t or cte_t pointer from a tcb block reference */
92#define TCB_PTR(r)       ((tcb_t *)(r))
93#define TCB_CTE_PTR(r,i) (((cte_t *)(r))+(i))
94#define TCB_REF(p)       ((word_t)(p))
95
96/* Generate a cte_t pointer from a tcb_t pointer */
97#define TCB_PTR_CTE_PTR(p,i) \
98    (((cte_t *)((word_t)(p)&~MASK(seL4_TCBBits)))+(i))
99
100#define SC_REF(p) ((word_t) (p))
101#define SC_PTR(r) ((sched_context_t *) (r))
102
103#define REPLY_REF(p) ((word_t) (p))
104#define REPLY_PTR(r) ((reply_t *) (r))
105
106#define WORD_PTR(r) ((word_t *)(r))
107#define WORD_REF(p) ((word_t)(p))
108
109#define ZombieType_ZombieTCB        BIT(wordRadix)
110#define ZombieType_ZombieCNode(n)   ((n) & MASK(wordRadix))
111
112static inline cap_t CONST Zombie_new(word_t number, word_t type, word_t ptr)
113{
114    word_t mask;
115
116    if (type == ZombieType_ZombieTCB) {
117        mask = MASK(TCB_CNODE_RADIX + 1);
118    } else {
119        mask = MASK(type + 1);
120    }
121
122    return cap_zombie_cap_new((ptr & ~mask) | (number & mask), type);
123}
124
125static inline word_t CONST cap_zombie_cap_get_capZombieBits(cap_t cap)
126{
127    word_t type = cap_zombie_cap_get_capZombieType(cap);
128    if (type == ZombieType_ZombieTCB) {
129        return TCB_CNODE_RADIX;
130    }
131    return ZombieType_ZombieCNode(type); /* cnode radix */
132}
133
134static inline word_t CONST cap_zombie_cap_get_capZombieNumber(cap_t cap)
135{
136    word_t radix = cap_zombie_cap_get_capZombieBits(cap);
137    return cap_zombie_cap_get_capZombieID(cap) & MASK(radix + 1);
138}
139
140static inline word_t CONST cap_zombie_cap_get_capZombiePtr(cap_t cap)
141{
142    word_t radix = cap_zombie_cap_get_capZombieBits(cap);
143    return cap_zombie_cap_get_capZombieID(cap) & ~MASK(radix + 1);
144}
145
146static inline cap_t CONST cap_zombie_cap_set_capZombieNumber(cap_t cap, word_t n)
147{
148    word_t radix = cap_zombie_cap_get_capZombieBits(cap);
149    word_t ptr = cap_zombie_cap_get_capZombieID(cap) & ~MASK(radix + 1);
150    return cap_zombie_cap_set_capZombieID(cap, ptr | (n & MASK(radix + 1)));
151}
152
153/* Capability table entry (CTE) */
154struct cte {
155    cap_t cap;
156    mdb_node_t cteMDBNode;
157};
158typedef struct cte cte_t;
159
160#define nullMDBNode mdb_node_new(0, false, false, 0)
161
162/* Thread state */
163enum _thread_state {
164    ThreadState_Inactive = 0,
165    ThreadState_Running,
166    ThreadState_Restart,
167    ThreadState_BlockedOnReceive,
168    ThreadState_BlockedOnSend,
169    ThreadState_BlockedOnReply,
170    ThreadState_BlockedOnNotification,
171#ifdef CONFIG_VTX
172    ThreadState_RunningVM,
173#endif
174    ThreadState_IdleThreadState
175};
176typedef word_t _thread_state_t;
177
178/* A TCB CNode and a TCB are always allocated together, and adjacently.
179 * The CNode comes first. */
180enum tcb_cnode_index {
181    /* CSpace root */
182    tcbCTable = 0,
183
184    /* VSpace root */
185    tcbVTable = 1,
186
187#ifdef CONFIG_KERNEL_MCS
188    /* IPC buffer cap slot */
189    tcbBuffer = 2,
190
191    /* Fault endpoint slot */
192    tcbFaultHandler = 3,
193
194    /* Timeout endpoint slot */
195    tcbTimeoutHandler = 4,
196#else
197    /* Reply cap slot */
198    tcbReply = 2,
199
200    /* TCB of most recent IPC sender */
201    tcbCaller = 3,
202
203    /* IPC buffer cap slot */
204    tcbBuffer = 4,
205#endif
206    tcbCNodeEntries
207};
208typedef word_t tcb_cnode_index_t;
209
210#include <arch/object/structures.h>
211
212struct user_data {
213    word_t words[BIT(seL4_PageBits) / sizeof(word_t)];
214};
215typedef struct user_data user_data_t;
216
217struct user_data_device {
218    word_t words[BIT(seL4_PageBits) / sizeof(word_t)];
219};
220typedef struct user_data_device user_data_device_t;
221
222static inline word_t CONST wordFromVMRights(vm_rights_t vm_rights)
223{
224    return (word_t)vm_rights;
225}
226
227static inline vm_rights_t CONST vmRightsFromWord(word_t w)
228{
229    return (vm_rights_t)w;
230}
231
232static inline vm_attributes_t CONST vmAttributesFromWord(word_t w)
233{
234    vm_attributes_t attr;
235
236    attr.words[0] = w;
237    return attr;
238}
239
240#ifdef CONFIG_KERNEL_MCS
241typedef struct sched_context sched_context_t;
242typedef struct reply reply_t;
243#endif
244
245/* TCB: size >= 18 words + sizeof(arch_tcb_t) + 1 word on MCS (aligned to nearest power of 2) */
246struct tcb {
247    /* arch specific tcb state (including context)*/
248    arch_tcb_t tcbArch;
249
250    /* Thread state, 3 words */
251    thread_state_t tcbState;
252
253    /* Notification that this TCB is bound to. If this is set, when this TCB waits on
254     * any sync endpoint, it may receive a signal from a Notification object.
255     * 1 word*/
256    notification_t *tcbBoundNotification;
257
258    /* Current fault, 2 words */
259    seL4_Fault_t tcbFault;
260
261    /* Current lookup failure, 2 words */
262    lookup_fault_t tcbLookupFailure;
263
264    /* Domain, 1 byte (padded to 1 word) */
265    dom_t tcbDomain;
266
267    /*  maximum controlled priority, 1 byte (padded to 1 word) */
268    prio_t tcbMCP;
269
270    /* Priority, 1 byte (padded to 1 word) */
271    prio_t tcbPriority;
272
273#ifdef CONFIG_KERNEL_MCS
274    /* scheduling context that this tcb is running on, if it is NULL the tcb cannot
275     * be in the scheduler queues, 1 word */
276    sched_context_t *tcbSchedContext;
277
278    /* scheduling context that this tcb yielded to */
279    sched_context_t *tcbYieldTo;
280#else
281    /* Timeslice remaining, 1 word */
282    word_t tcbTimeSlice;
283
284    /* Capability pointer to thread fault handler, 1 word */
285    cptr_t tcbFaultHandler;
286#endif
287
288    /* userland virtual address of thread IPC buffer, 1 word */
289    word_t tcbIPCBuffer;
290
291#ifdef ENABLE_SMP_SUPPORT
292    /* cpu ID this thread is running on, 1 word */
293    word_t tcbAffinity;
294#endif /* ENABLE_SMP_SUPPORT */
295
296    /* Previous and next pointers for scheduler queues , 2 words */
297    struct tcb *tcbSchedNext;
298    struct tcb *tcbSchedPrev;
299    /* Preivous and next pointers for endpoint and notification queues, 2 words */
300    struct tcb *tcbEPNext;
301    struct tcb *tcbEPPrev;
302
303#ifdef CONFIG_KERNEL_MCS
304    /* if tcb is in a call, pointer to the reply object, 1 word */
305    reply_t *tcbReply;
306#endif
307#ifdef CONFIG_BENCHMARK_TRACK_UTILISATION
308    /* 16 bytes (12 bytes aarch32) */
309    benchmark_util_t benchmark;
310#endif
311};
312typedef struct tcb tcb_t;
313
314#ifdef CONFIG_DEBUG_BUILD
315/* This debug_tcb object is inserted into the 'unused' region of a TCB object
316   for debug build configurations. */
317struct debug_tcb {
318
319    /* Pointers for list of all tcbs that is maintained
320     * when CONFIG_DEBUG_BUILD is enabled, 2 words */
321    struct tcb *tcbDebugNext;
322    struct tcb *tcbDebugPrev;
323    /* Use any remaining space for a thread name */
324    char tcbName[];
325
326};
327typedef struct debug_tcb debug_tcb_t;
328
329#define TCB_PTR_DEBUG_PTR(p) ((debug_tcb_t *)TCB_PTR_CTE_PTR(p,tcbArchCNodeEntries))
330#endif /* CONFIG_DEBUG_BUILD */
331
332#ifdef CONFIG_KERNEL_MCS
333typedef struct refill {
334    /* Absolute timestamp from when this refill can be used */
335    ticks_t rTime;
336    /* Amount of ticks that can be used from this refill */
337    ticks_t rAmount;
338} refill_t;
339
340#define MIN_REFILLS 2u
341
342struct sched_context {
343    /* period for this sc -- controls rate at which budget is replenished */
344    ticks_t scPeriod;
345
346    /* amount of ticks this sc has been scheduled for since seL4_SchedContext_Consumed
347     * was last called or a timeout exception fired */
348    ticks_t scConsumed;
349
350    /* core this scheduling context provides time for - 0 if uniprocessor */
351    word_t scCore;
352
353    /* thread that this scheduling context is bound to */
354    tcb_t *scTcb;
355
356    /* if this is not NULL, it points to the last reply object that was generated
357     * when the scheduling context was passed over a Call */
358    reply_t *scReply;
359
360    /* notification this scheduling context is bound to
361     * (scTcb and scNotification cannot be set at the same time) */
362    notification_t *scNotification;
363
364    /* data word that is sent with timeout faults that occur on this scheduling context */
365    word_t scBadge;
366
367    /* thread that yielded to this scheduling context */
368    tcb_t *scYieldFrom;
369
370    /* Amount of refills this sc tracks */
371    word_t scRefillMax;
372    /* Index of the head of the refill circular buffer */
373    word_t scRefillHead;
374    /* Index of the tail of the refill circular buffer */
375    word_t scRefillTail;
376};
377
378struct reply {
379    /* TCB pointed to by this reply object. This pointer reflects two possible relations, depending
380     * on the thread state.
381     *
382     * ThreadState_BlockedOnReply: this tcb is the caller that is blocked on this reply object,
383     * ThreadState_BlockedOnRecv: this tcb is the callee blocked on an endpoint with this reply object.
384     *
385     * The back pointer for this TCB is stored in the thread state.*/
386    tcb_t *replyTCB;
387
388    /* 0 if this is the start of the call chain, or points to the
389     * previous reply object in a call chain */
390    call_stack_t replyPrev;
391
392    /* Either a scheduling context if this reply object is the head of the call chain
393     * (the last caller before the server) or another reply object. 0 if no scheduling
394     * context was passed along the call chain */
395    call_stack_t replyNext;
396};
397#endif
398
399/* Ensure object sizes are sane */
400compile_assert(cte_size_sane, sizeof(cte_t) <= BIT(seL4_SlotBits))
401compile_assert(tcb_cte_size_sane, TCB_CNODE_SIZE_BITS <= TCB_SIZE_BITS)
402compile_assert(tcb_size_sane,
403               BIT(TCB_SIZE_BITS) >= sizeof(tcb_t))
404compile_assert(tcb_size_not_excessive,
405               BIT(TCB_SIZE_BITS - 1) < sizeof(tcb_t))
406compile_assert(ep_size_sane, sizeof(endpoint_t) <= BIT(seL4_EndpointBits))
407compile_assert(notification_size_sane, sizeof(notification_t) <= BIT(seL4_NotificationBits))
408
409/* Check the IPC buffer is the right size */
410compile_assert(ipc_buf_size_sane, sizeof(seL4_IPCBuffer) == BIT(seL4_IPCBufferSizeBits))
411#ifdef CONFIG_KERNEL_MCS
412compile_assert(sc_core_size_sane, (sizeof(sched_context_t) + MIN_REFILLS *sizeof(refill_t) <=
413                                   seL4_CoreSchedContextBytes))
414compile_assert(reply_size_sane, sizeof(reply_t) <= BIT(seL4_ReplyBits))
415compile_assert(refill_size_sane, (sizeof(refill_t) == seL4_RefillSizeBytes))
416#endif
417
418/* helper functions */
419
420static inline word_t CONST
421isArchCap(cap_t cap)
422{
423    return (cap_get_capType(cap) % 2);
424}
425
426static inline word_t CONST cap_get_capSizeBits(cap_t cap)
427{
428
429    cap_tag_t ctag;
430
431    ctag = cap_get_capType(cap);
432
433    switch (ctag) {
434    case cap_untyped_cap:
435        return cap_untyped_cap_get_capBlockSize(cap);
436
437    case cap_endpoint_cap:
438        return seL4_EndpointBits;
439
440    case cap_notification_cap:
441        return seL4_NotificationBits;
442
443    case cap_cnode_cap:
444        return cap_cnode_cap_get_capCNodeRadix(cap) + seL4_SlotBits;
445
446    case cap_thread_cap:
447        return seL4_TCBBits;
448
449    case cap_zombie_cap: {
450        word_t type = cap_zombie_cap_get_capZombieType(cap);
451        if (type == ZombieType_ZombieTCB) {
452            return seL4_TCBBits;
453        }
454        return ZombieType_ZombieCNode(type) + seL4_SlotBits;
455    }
456
457    case cap_null_cap:
458        return 0;
459
460    case cap_domain_cap:
461        return 0;
462
463    case cap_reply_cap:
464#ifdef CONFIG_KERNEL_MCS
465        return seL4_ReplyBits;
466#else
467        return 0;
468#endif
469
470    case cap_irq_control_cap:
471#ifdef CONFIG_KERNEL_MCS
472    case cap_sched_control_cap:
473#endif
474        return 0;
475
476    case cap_irq_handler_cap:
477        return 0;
478
479#ifdef CONFIG_KERNEL_MCS
480    case cap_sched_context_cap:
481        return cap_sched_context_cap_get_capSCSizeBits(cap);
482#endif
483
484    default:
485        return cap_get_archCapSizeBits(cap);
486    }
487
488}
489
490/* Returns whether or not this capability has memory associated
491 * with it or not. Referring to this as 'being physical' is to
492 * match up with the Haskell and abstract specifications */
493static inline bool_t CONST cap_get_capIsPhysical(cap_t cap)
494{
495    cap_tag_t ctag;
496
497    ctag = cap_get_capType(cap);
498
499    switch (ctag) {
500    case cap_untyped_cap:
501        return true;
502
503    case cap_endpoint_cap:
504        return true;
505
506    case cap_notification_cap:
507        return true;
508
509    case cap_cnode_cap:
510        return true;
511
512    case cap_thread_cap:
513#ifdef CONFIG_KERNEL_MCS
514    case cap_sched_context_cap:
515#endif
516        return true;
517
518    case cap_zombie_cap:
519        return true;
520
521    case cap_domain_cap:
522        return false;
523
524    case cap_reply_cap:
525#ifdef CONFIG_KERNEL_MCS
526        return true;
527#else
528        return false;
529#endif
530
531    case cap_irq_control_cap:
532#ifdef CONFIG_KERNEL_MCS
533    case cap_sched_control_cap:
534#endif
535        return false;
536
537    case cap_irq_handler_cap:
538        return false;
539
540    default:
541        return cap_get_archCapIsPhysical(cap);
542    }
543}
544
545static inline void *CONST cap_get_capPtr(cap_t cap)
546{
547    cap_tag_t ctag;
548
549    ctag = cap_get_capType(cap);
550
551    switch (ctag) {
552    case cap_untyped_cap:
553        return WORD_PTR(cap_untyped_cap_get_capPtr(cap));
554
555    case cap_endpoint_cap:
556        return EP_PTR(cap_endpoint_cap_get_capEPPtr(cap));
557
558    case cap_notification_cap:
559        return NTFN_PTR(cap_notification_cap_get_capNtfnPtr(cap));
560
561    case cap_cnode_cap:
562        return CTE_PTR(cap_cnode_cap_get_capCNodePtr(cap));
563
564    case cap_thread_cap:
565        return TCB_PTR_CTE_PTR(cap_thread_cap_get_capTCBPtr(cap), 0);
566
567    case cap_zombie_cap:
568        return CTE_PTR(cap_zombie_cap_get_capZombiePtr(cap));
569
570    case cap_domain_cap:
571        return NULL;
572
573    case cap_reply_cap:
574#ifdef CONFIG_KERNEL_MCS
575        return REPLY_PTR(cap_reply_cap_get_capReplyPtr(cap));
576#else
577        return NULL;
578#endif
579
580    case cap_irq_control_cap:
581#ifdef CONFIG_KERNEL_MCS
582    case cap_sched_control_cap:
583#endif
584        return NULL;
585
586    case cap_irq_handler_cap:
587        return NULL;
588
589#ifdef CONFIG_KERNEL_MCS
590    case cap_sched_context_cap:
591        return SC_PTR(cap_sched_context_cap_get_capSCPtr(cap));
592#endif
593
594    default:
595        return cap_get_archCapPtr(cap);
596
597    }
598}
599
600static inline bool_t CONST isCapRevocable(cap_t derivedCap, cap_t srcCap)
601{
602    if (isArchCap(derivedCap)) {
603        return Arch_isCapRevocable(derivedCap, srcCap);
604    }
605    switch (cap_get_capType(derivedCap)) {
606    case cap_endpoint_cap:
607        return (cap_endpoint_cap_get_capEPBadge(derivedCap) !=
608                cap_endpoint_cap_get_capEPBadge(srcCap));
609
610    case cap_notification_cap:
611        return (cap_notification_cap_get_capNtfnBadge(derivedCap) !=
612                cap_notification_cap_get_capNtfnBadge(srcCap));
613
614    case cap_irq_handler_cap:
615        return (cap_get_capType(srcCap) ==
616                cap_irq_control_cap);
617
618    case cap_untyped_cap:
619        return true;
620
621    default:
622        return false;
623    }
624}
625
626