1/**
2 * \file
3 * \brief Threads implementation.
4 */
5
6/*
7 * Copyright (c) 2007, 2008, 2009, 2010, 2011, 2012, ETH Zurich.
8 * All rights reserved.
9 *
10 * This file is distributed under the terms in the attached LICENSE file.
11 * If you do not find this file, copies can be found by writing to:
12 * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
13 */
14
15#include <stdlib.h>
16#include <stdio.h>
17#include <string.h>
18#include <barrelfish/barrelfish.h>
19#include <barrelfish/dispatch.h>
20#include <barrelfish/dispatcher_arch.h>
21#include <barrelfish/debug.h>
22#include <barrelfish/slab.h>
23#include <barrelfish/caddr.h>
24#include <barrelfish/curdispatcher_arch.h>
25#include <barrelfish/vspace_mmu_aware.h>
26#include <barrelfish_kpi/cpu_arch.h>
27#include <barrelfish_kpi/domain_params.h>
28#include <arch/registers.h>
29#include <trace/trace.h>
30
31#include <trace_definitions/trace_defs.h>
32
33#include "arch/threads.h"
34#include "threads_priv.h"
35#include "init.h"
36
37#if defined(__x86_64__)
38#  include "arch/ldt.h"
39#endif
40
41
42/// Maximum number of threads in a domain, used to size VM region for thread structures
43// there is no point having MAX_THREADS > LDT_NENTRIES on x86 (see ldt.c)
44#define MAX_THREADS 256
45
46/// Static stack and storage for a bootstrap/cleanup thread
47// XXX: 16-byte aligned for x86-64
48static uintptr_t staticstack[THREADS_DEFAULT_STACK_BYTES / sizeof(uintptr_t)]
49__attribute__((aligned(STACK_ALIGNMENT)));
50
51static struct thread staticthread __attribute__((aligned(THREAD_ALIGNMENT))) = {
52    .stack = staticstack,
53    .stack_top = (char *)staticstack + sizeof(staticstack)
54};
55static struct thread_mutex staticthread_lock = THREAD_MUTEX_INITIALIZER;
56
57/// Storage metadata for thread structures (and TLS data)
58static struct slab_allocator thread_slabs;
59static struct vspace_mmu_aware thread_slabs_vm;
60
61// XXX: mutex and spinlock protecting thread slabs in spanned domains
62/* This ought to be just a mutex. However, thread_create() is called on the
63 * inter-disp message handler thread, and if it blocks in a mutex, there is no
64 * way to wake it up and we will deadlock. This is a quick-fix workaround:
65 *   The spinlock protects the data structure
66 *   The mutex avoids unneccessary spinning (it is acquired first when safe)
67 */
68static spinlock_t thread_slabs_spinlock;
69static struct thread_mutex thread_slabs_mutex = THREAD_MUTEX_INITIALIZER;
70
71/// Base and size of the original ("pristine") thread-local storage init data
72static void *tls_block_init_base;
73static size_t tls_block_init_len;
74static size_t tls_block_total_len;
75
76/// Warning already issued about RSP usage.  (Prevent repeated warnings
77/// from the same domain -- e.g., when using THC whose stacks appear
78/// invalid here).
79__attribute__((unused)) static bool stack_warned=0;
80
81/// Wrapper function for most threads, runs given function then deletes itself
82static void thread_entry(thread_func_t start_func, void *start_data)
83{
84    assert((lvaddr_t)start_func >= BASE_PAGE_SIZE);
85    int retval = start_func(start_data);
86    thread_exit(retval);
87    assert(!"thread_exit returned");
88}
89
90/// int counter for assigning initial thread ids
91static uintptr_t threadid = 0;
92
93#ifndef NDEBUG
94/// Debugging assertions on thread queues
95static void check_queue(struct thread *queue)
96{
97    if (queue == NULL) {
98        return;
99    }
100    struct thread *q = queue;
101    int i = 0;
102
103    do {
104        assert_disabled(q != NULL);
105
106        // check for NULL next and prev pointers
107        assert_disabled((lvaddr_t)q->next > BASE_PAGE_SIZE);
108        assert_disabled((lvaddr_t)q->prev > BASE_PAGE_SIZE);
109
110        // check that next and prev pointers are sane
111        assert_disabled(q->next->prev == q);
112        assert_disabled(q->prev->next == q);
113
114        // advance to next elem
115        q = q->next;
116        i++;
117        assert_disabled(i < MAX_THREADS);
118    } while (q != queue);
119}
120#else /* NDEBUG version */
121static inline void check_queue(struct thread *queue) {}
122#endif
123
124/**
125 * \brief Enqueue a thread in the given queue
126 *
127 * For safety, should only happen while disabled.
128 */
129void thread_enqueue(struct thread *thread, struct thread **queue)
130{
131    assert_disabled(thread != NULL);
132    assert_disabled(queue != NULL);
133    check_queue(*queue);
134    if (*queue == NULL) {
135        *queue = thread->prev = thread->next = thread;
136    } else {
137        assert_disabled((*queue)->prev != NULL);
138        thread->prev = (*queue)->prev;
139        thread->next = *queue;
140        (*queue)->prev = thread;
141        assert_disabled(thread->prev != NULL);
142        thread->prev->next = thread;
143    }
144
145    check_queue(*queue);
146}
147
148/**
149 * \brief Dequeue the first thread on the given queue
150 *
151 * For safety, should only happen while disabled.
152 *
153 * \returns Pointer to thread that was dequeued
154 */
155struct thread *thread_dequeue(struct thread **queue)
156{
157    assert_disabled(queue != NULL);
158    struct thread *thread = *queue;
159    assert_disabled(thread != NULL);
160    check_queue(thread);
161    if (thread->prev == thread) {
162        assert_disabled(thread->next == thread);
163        *queue = NULL;
164    } else {
165        thread->prev->next = thread->next;
166        thread->next->prev = thread->prev;
167        *queue = thread->next;
168    }
169    check_queue(*queue);
170#ifndef NDEBUG
171    thread->prev = thread->next = NULL;
172#endif
173    return thread;
174}
175
176/**
177 * \brief Remove a specific thread from a queue
178 *
179 * Does not check that the thread is in the given queue, which it must be.
180 * For safety, should only happen while disabled.
181 */
182void thread_remove_from_queue(struct thread **queue, struct thread *thread)
183{
184    assert_disabled(queue != NULL);
185    assert_disabled(thread != NULL);
186    check_queue(*queue);
187    if (thread->prev == thread) {
188        assert_disabled(thread->next == thread);
189        assert_disabled(*queue == thread);
190        *queue = NULL;
191    } else {
192        thread->prev->next = thread->next;
193        thread->next->prev = thread->prev;
194        if (*queue == thread) {
195            *queue = thread->next;
196        }
197    }
198    check_queue(*queue);
199#ifndef NDEBUG
200    thread->prev = thread->next = NULL;
201#endif
202}
203
204/// Refill backing storage for thread region
205static errval_t refill_thread_slabs(struct slab_allocator *slabs)
206{
207    assert(slabs == &thread_slabs);
208
209    size_t size;
210    void *buf;
211    errval_t err;
212
213    size_t blocksize = sizeof(struct thread) + tls_block_total_len;
214    err = vspace_mmu_aware_map(&thread_slabs_vm, blocksize, &buf, &size);
215    if (err_is_fail(err)) {
216        return err_push(err, LIB_ERR_VSPACE_MMU_AWARE_MAP);
217    }
218
219    slab_grow(slabs, buf, size);
220
221    return SYS_ERR_OK;
222}
223
224/// Initialise the state of a new thread structure
225static void thread_init(dispatcher_handle_t disp, struct thread *newthread)
226{
227    newthread->self = newthread;
228#ifndef NDEBUG
229    newthread->next = newthread->prev = NULL;
230#endif
231    newthread->tls_dtv = NULL;
232    newthread->disp = disp;
233    newthread->coreid = get_dispatcher_generic(disp)->core_id;
234    newthread->userptr = NULL;
235    memset(newthread->userptrs, 0, sizeof(newthread->userptrs));
236    newthread->yield_epoch = 0;
237    newthread->wakeup_reason = NULL;
238    newthread->return_value = 0;
239    thread_cond_init(&newthread->exit_condition);
240    thread_mutex_init(&newthread->exit_lock);
241    newthread->state = THREAD_STATE_RUNNABLE;
242    newthread->detached = false;
243    newthread->joining = false;
244    newthread->in_exception = false;
245    newthread->paused = false;
246    newthread->slab = NULL;
247    newthread->token = 0;
248    newthread->token_number = 1;
249
250    newthread->rpc_in_progress = false;
251    newthread->async_error = SYS_ERR_OK;
252    newthread->local_trigger = NULL;
253}
254
255/**
256 * \brief Returns false if the stack pointer is out of bounds.
257 */
258static bool thread_check_stack_bounds(struct thread *thread,
259                                      arch_registers_state_t *archregs) {
260    lvaddr_t sp = (lvaddr_t) registers_get_sp(archregs);
261    return sp > (lvaddr_t)thread->stack ||
262           sp <= (lvaddr_t)thread->stack_top;
263}
264
265/**
266 * \brief Schedule and run the next active thread, or yield the dispatcher.
267 *
268 * This may only be called from the dispatcher (on its stack and while
269 * disabled!).
270 *
271 * \param disp Dispatcher pointer
272 */
273void thread_run_disabled(dispatcher_handle_t handle)
274{
275    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
276    struct dispatcher_shared_generic *disp =
277        get_dispatcher_shared_generic(handle);
278    arch_registers_state_t *enabled_area =
279        dispatcher_get_enabled_save_area(handle);
280
281    if (disp_gen->current != NULL) {
282        assert_disabled(disp_gen->runq != NULL);
283
284        // check stack bounds
285        warn_disabled(&stack_warned,
286                      thread_check_stack_bounds(disp_gen->current, enabled_area));
287
288        struct thread *next = disp_gen->current->next;
289        assert_disabled(next != NULL);
290        if (next != disp_gen->current) {
291            // save previous thread's state
292            arch_registers_state_t *cur_regs = &disp_gen->current->regs;
293            memcpy(cur_regs, enabled_area, sizeof(arch_registers_state_t));
294            disp_gen->current = next;
295            disp_resume(handle, &next->regs);
296        } else {
297            // same thread as before
298            disp_resume(handle, enabled_area);
299        }
300    } else if (disp_gen->runq != NULL) {
301        disp_gen->current = disp_gen->runq;
302        disp->haswork = true;
303        disp_resume(handle, &disp_gen->runq->regs);
304    } else {
305        // kernel gave us the CPU when we have nothing to do. block!
306        disp->haswork = havework_disabled(handle);
307        disp_gen->current = NULL;
308        disp_yield_disabled(handle);
309    }
310}
311
312/** Free all heap/slab-allocated state associated with a thread */
313static void free_thread(struct thread *thread)
314{
315#if defined(__x86_64__) // XXX: gungy segment selector stuff
316    assert(thread->thread_seg_selector != 0);
317    uint16_t fs;
318    __asm("mov %%fs, %0" : "=r" (fs));
319    if (thread->thread_seg_selector == fs) {
320        assert(thread->disp == curdispatcher());
321        struct dispatcher_x86_64 *disp_priv = get_dispatcher_x86_64(thread->disp);
322        // we're freeing the current thread; make sure we reload a valid segment
323        // selector so that curdispatcher() keeps working!
324        __asm volatile("mov %%ax, %%fs"
325                       : /* No outputs */
326                       : "a" (disp_priv->disp_seg_selector));
327    }
328    ldt_free_segment(thread->thread_seg_selector);
329#endif
330
331    free(thread->stack);
332    if (thread->tls_dtv != NULL) {
333        free(thread->tls_dtv);
334    }
335
336    thread_mutex_lock(&thread_slabs_mutex);
337    acquire_spinlock(&thread_slabs_spinlock);
338    slab_free(&thread_slabs, thread->slab); // frees thread itself
339    release_spinlock(&thread_slabs_spinlock);
340    thread_mutex_unlock(&thread_slabs_mutex);
341}
342
343#define ALIGN_PTR(ptr, alignment) ((((uintptr_t)(ptr)) + (alignment) - 1) & ~((alignment) - 1))
344
345/**
346 * \brief Creates a new thread that will not be runnable
347 *
348 * \param start_func Function to run on the new thread
349 * \param arg Argument to pass to function
350 * \param stacksize Size of stack, in bytes
351 *
352 * \returns Thread pointer on success, NULL on failure
353 */
354struct thread *thread_create_unrunnable(thread_func_t start_func, void *arg,
355                                        size_t stacksize)
356{
357    // allocate stack
358    assert((stacksize % sizeof(uintptr_t)) == 0);
359    void *stack = malloc(stacksize);
360    if (stack == NULL) {
361        return NULL;
362    }
363
364    // allocate space for TCB + initial TLS data
365    // no mutex as it may deadlock: see comment for thread_slabs_spinlock
366    // thread_mutex_lock(&thread_slabs_mutex);
367    acquire_spinlock(&thread_slabs_spinlock);
368    void *space = slab_alloc(&thread_slabs);
369    release_spinlock(&thread_slabs_spinlock);
370    // thread_mutex_unlock(&thread_slabs_mutex);
371    if (space == NULL) {
372        free(stack);
373        return NULL;
374    }
375
376    // split space into TLS data followed by TCB
377    // XXX: this layout is specific to the x86 ABIs! once other (saner)
378    // architectures support TLS, we'll need to break out the logic.
379    void *tls_data = space;
380    struct thread *newthread = (void *)ALIGN_PTR((uintptr_t)space + tls_block_total_len, THREAD_ALIGNMENT);
381
382    // init thread
383    thread_init(curdispatcher(), newthread);
384    newthread->slab = space;
385
386    if (tls_block_total_len > 0) {
387        // populate initial TLS data from pristine copy
388        assert(tls_block_init_len <= tls_block_total_len);
389        memcpy(tls_data, tls_block_init_base, tls_block_init_len);
390
391        // zero-fill remainder
392        memset((char *)tls_data + tls_block_init_len, 0,
393               tls_block_total_len - tls_block_init_len);
394
395        // create a TLS thread vector
396        struct tls_dtv *dtv = malloc(sizeof(struct tls_dtv) + 1 * sizeof(void *));
397        assert(dtv != NULL);
398
399        dtv->gen = 0;
400        dtv->dtv[0] = tls_data;
401        newthread->tls_dtv = dtv;
402    }
403
404    // FIXME: make arch-specific
405#if defined(__x86_64__) || defined(__k1om__)
406    // create segment for TCB
407    errval_t err = ldt_alloc_segment(newthread, &newthread->thread_seg_selector);
408    if (err_is_fail(err)) {
409        DEBUG_ERR(err, "error allocating LDT segment for new thread");
410        free_thread(newthread);
411        free(stack);
412        return NULL;
413    }
414#endif
415
416    // init stack
417    newthread->stack = stack;
418    newthread->stack_top = (char *)stack + stacksize;
419
420    // waste space for alignment, if malloc gave us an unaligned stack
421    newthread->stack_top = (char *)newthread->stack_top
422        - (lvaddr_t)newthread->stack_top % STACK_ALIGNMENT;
423
424    // set thread's ID
425    newthread->id = threadid++;
426
427    // init registers
428    registers_set_initial(&newthread->regs, newthread, (lvaddr_t)thread_entry,
429                          (lvaddr_t)newthread->stack_top,
430                          (lvaddr_t)start_func, (lvaddr_t)arg, 0, 0);
431
432    return newthread;
433}
434
435/**
436 * \brief Creates a new thread, and makes it runnable
437 *
438 * \param start_func Function to run on the new thread
439 * \param arg Argument to pass to function
440 * \param stacksize Size of stack, in bytes
441 *
442 * \returns Thread pointer on success, NULL on failure
443 */
444struct thread *thread_create_varstack(thread_func_t start_func, void *arg,
445                                      size_t stacksize)
446{
447    struct thread *newthread = thread_create_unrunnable(start_func, arg, stacksize);
448    if (newthread) {
449        // enqueue on runq
450        dispatcher_handle_t handle = disp_disable();
451        struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
452        newthread->disp = handle;
453        thread_enqueue(newthread, &disp_gen->runq);
454        disp_enable(handle);
455    }
456    return newthread;
457}
458
459/**
460 * \brief Creates a new thread, and makes it runnable
461 *
462 * \param start_func Function to run on the new thread
463 * \param arg Argument to pass to function
464 *
465 * \returns Thread pointer on success, NULL on failure
466 */
467struct thread *thread_create(thread_func_t start_func, void *arg)
468{
469    return thread_create_varstack(start_func, arg, THREADS_DEFAULT_STACK_BYTES);
470}
471
472/**
473 * \brief Wait for termination of another thread
474 *
475 * \param thread        Pointer to thread to wait for
476 * \param retval        Pointer to variable to hold return value of thread, or NULL
477 *
478 * \returns SYS_ERR_OK on success, error code on error.
479 */
480errval_t thread_join(struct thread *thread, int *retval)
481{
482    assert(thread != NULL);
483    // this function should only be called for threads on same core
484    assert(thread->coreid == disp_get_core_id());
485
486    thread_mutex_lock(&thread->exit_lock);
487    if(thread->detached) {
488        // Thread is detached and thus not joinable
489        thread_mutex_unlock(&thread->exit_lock);
490        return LIB_ERR_THREAD_JOIN_DETACHED;
491    }
492
493    if(thread->joining) {
494        // Someone else already joins, that's an error
495        thread_mutex_unlock(&thread->exit_lock);
496        return LIB_ERR_THREAD_JOIN;
497    }
498
499    thread->joining = true;
500    if(thread->state != THREAD_STATE_EXITED) { // Possibly wait for thread exit
501        thread_cond_wait(&thread->exit_condition, &thread->exit_lock);
502    }
503
504    if(retval != NULL) {
505        *retval = thread->return_value;
506    }
507
508    thread_mutex_unlock(&thread->exit_lock);    // Not really needed
509    free_thread(thread);
510
511    return SYS_ERR_OK;
512}
513
514/**
515 * \brief Detach a thread. Free its state when it terminates.
516 *
517 * \param thread        Pointer to thread to detach
518 *
519 * \return SYS_ERR_OK on success.
520 */
521errval_t thread_detach(struct thread *thread)
522{
523    assert(thread != NULL);
524    thread_mutex_lock(&thread->exit_lock);
525
526    if(thread->joining) {
527        // Someone else already joins, that's an error
528        thread_mutex_unlock(&thread->exit_lock);
529        return LIB_ERR_THREAD_JOIN;
530    }
531
532    if(!thread->detached) {
533        thread->detached = true;
534    } else {
535        // Detaching more than once is an error
536        thread_mutex_unlock(&thread->exit_lock);
537        return LIB_ERR_THREAD_DETACHED;
538    }
539
540    if(thread->state == THREAD_STATE_EXITED) {
541        // Thread already exited before we detached, clean it up
542        free_thread(thread);
543        return SYS_ERR_OK;
544    }
545
546    thread_mutex_unlock(&thread->exit_lock);
547    return SYS_ERR_OK;
548}
549
550/**
551 * \brief Returns the thread pointer to the currently-running thread
552 */
553struct thread *thread_self(void)
554{
555    struct thread *me;
556#if defined(__x86_64__) // XXX: AB's silly little arch-specific optimisation
557    __asm("movq %%fs:0, %0" : "=r" (me));
558#else
559    // it's not necessary to disable, but might be once we do migration
560    bool was_enabled;
561    dispatcher_handle_t handle = disp_try_disable(&was_enabled);
562    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
563    me = disp_gen->current;
564    if (was_enabled)
565        disp_enable(handle);
566#endif
567    return me;
568}
569
570struct thread *thread_self_disabled(void)
571{
572    dispatcher_handle_t handle = curdispatcher();
573    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
574    return disp_gen->current;
575}
576
577uintptr_t thread_id(void)
578{
579    return thread_self()->id;
580}
581
582uintptr_t thread_get_id(struct thread *t)
583{
584    return t->id;
585}
586
587void thread_set_id(uintptr_t id)
588{
589    struct thread *me = thread_self();
590    me->id = id;
591}
592
593uint32_t thread_set_token(struct waitset_chanstate *channel)
594{
595    struct thread *me = thread_self();
596    // generate new token
597    uint32_t outgoing_token = (uint32_t)((me->id << 16) |
598         (me->coreid << 24) | ((me->token_number & 255) << 8)) | 1;
599    assert(me->token == 0);
600    me->token_number++;
601    if (!(me->token_number & 255))
602        me->token_number = 1;
603    me->token = outgoing_token & ~1;    // wait for this token
604    me->channel = channel;              // on that channel
605    return outgoing_token;
606}
607
608void thread_clear_token(struct waitset_chanstate *channel)
609{
610    struct thread *me = thread_self();
611
612    me->token = 0;      // don't wait anymore
613    me->channel = NULL;
614}
615
616uint32_t thread_current_token(void)
617{
618    return thread_self()->token;
619}
620
621void thread_set_outgoing_token(uint32_t token)
622{
623    struct thread *me = thread_self();
624
625    assert(!me->outgoing_token);
626    me->outgoing_token = token;
627}
628
629void thread_get_outgoing_token(uint32_t *token)
630{
631    struct thread *me = thread_self();
632    // if thread's outgoing token is set, get it
633    if (me->outgoing_token) {
634        *token = me->outgoing_token;
635        me->outgoing_token = 0;
636    }
637}
638
639void thread_set_local_trigger(struct waitset_chanstate *trigger)
640{
641    struct thread *me = thread_self();
642    me->local_trigger = trigger;
643}
644
645struct waitset_chanstate * thread_get_local_trigger(void)
646{
647    struct thread *me = thread_self();
648    return me->local_trigger;
649}
650
651void thread_set_rpc_in_progress(bool v)
652{
653    thread_self()->rpc_in_progress = v;
654}
655
656bool thread_get_rpc_in_progress(void)
657{
658    return thread_self()->rpc_in_progress;
659}
660
661void thread_set_async_error(errval_t e)
662{
663    thread_self()->async_error = e;
664}
665
666errval_t thread_get_async_error(void)
667{
668    return thread_self()->async_error;
669}
670
671/**
672 * \brief Store receive slot provided by rpc
673 */
674
675void thread_store_recv_slot(struct capref recv_slot)
676{
677    dispatcher_handle_t handle = disp_disable();
678    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
679
680    assert(disp_gen->recv_slot_count < MAX_RECV_SLOTS);
681    assert(disp_gen->recv_slot_count >= 0);
682    disp_gen->recv_slots[disp_gen->recv_slot_count++] = recv_slot;
683
684    disp_enable(handle);
685}
686
687struct capref thread_get_next_recv_slot(void)
688{
689    dispatcher_handle_t handle = disp_disable();
690    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
691    struct capref retcap;
692
693    // HERE: recv_slot_count is > 0 if we have one+ caps stored
694    if (disp_gen->recv_slot_count <= 0) {
695        retcap = NULL_CAP;
696    } else {
697        retcap = disp_gen->recv_slots[--disp_gen->recv_slot_count];
698    }
699    disp_enable(handle);
700    return retcap;
701}
702
703void thread_set_status(int status) {
704    struct thread *me = thread_self();
705    me->return_value = status;
706}
707
708/**
709 * \brief Yield the calling thread
710 *
711 * Switches to the next runnable thread in this dispatcher, or if none is
712 * available, yields the dispatcher.
713 */
714void thread_yield(void)
715{
716    dispatcher_handle_t handle = disp_disable();
717    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
718    struct dispatcher_shared_generic *disp =
719        get_dispatcher_shared_generic(handle);
720    arch_registers_state_t *enabled_area =
721        dispatcher_get_enabled_save_area(handle);
722
723    struct thread *me = disp_gen->current;
724    struct thread *next = me;
725    me->yield_epoch = disp_gen->timeslice;
726
727    do {
728        assert_disabled(next != NULL);
729        next = next->next;
730        if (next == me) {
731            break; // Everybody yielded this timeslice
732        }
733    } while(next->yield_epoch == disp_gen->timeslice);
734
735    poll_channels_disabled(handle);
736
737    if (next != me) {
738        disp_gen->current = next;
739        disp_switch(handle, &me->regs, &next->regs);
740    } else {
741        assert_disabled(disp_gen->runq != NULL);
742        assert_disabled(disp->haswork);
743        trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_C_DISP_SAVE, 3);
744        disp_save(handle, enabled_area, true, CPTR_NULL);
745    }
746}
747
748/**
749 * \brief Yield both the calling thread, and the dispatcher to another domain
750 *
751 * \param endpoint Endpoint cap to which we wish to yield, or #CAP_NULL
752 *                  for an undirected yield
753 *
754 * Yields the dispatcher, optionally to another specified dispatcher.
755 */
756void thread_yield_dispatcher(struct capref endpoint)
757{
758    dispatcher_handle_t handle = disp_disable();
759    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
760    struct dispatcher_shared_generic *disp =
761        get_dispatcher_shared_generic(handle);
762    arch_registers_state_t *enabled_area =
763        dispatcher_get_enabled_save_area(handle);
764
765    assert_disabled(disp_gen->runq != NULL);
766    assert_disabled(disp->haswork);
767
768    trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_C_DISP_SAVE, 1);
769    disp_save(handle, enabled_area, true, get_cap_addr(endpoint));
770}
771
772/// Function that runs on the static thread/stack to clean up a "real" (alloced) thread
773static int cleanup_thread(void *arg)
774{
775    struct thread *thread = arg;
776
777    // free old thread and its stack
778    if (thread != NULL) {
779        free_thread(thread);
780    }
781
782    // disable and release static thread
783    dispatcher_handle_t handle = disp_disable();
784    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
785    struct dispatcher_shared_generic *disp =
786        get_dispatcher_shared_generic(handle);
787    struct thread *me = disp_gen->current;
788    struct thread *ft =
789        thread_mutex_unlock_disabled(handle, &disp_gen->cleanupthread_lock);
790    assert(ft == NULL);
791
792    // run the next thread, if any
793    struct thread *next = me->next;
794    thread_remove_from_queue(&disp_gen->runq, me);
795    if (next != me) {
796        disp_gen->current = next;
797        disp_resume(handle, &next->regs);
798    } else {
799        disp_gen->current = NULL;
800        disp->haswork = havework_disabled(handle);
801        disp_yield_disabled(handle);
802    }
803
804    return 0;
805}
806
807/**
808 * \brief Terminate the calling thread
809 */
810void thread_exit(int status)
811{
812    struct thread *me = thread_self();
813
814    thread_mutex_lock(&me->exit_lock);
815
816    // if this is the static thread, we don't need to do anything but cleanup
817    if (me == &staticthread) {
818        assert(me->detached);
819        // disable and release static thread
820        dispatcher_handle_t handle = disp_disable();
821        struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
822        struct dispatcher_shared_generic *disp =
823            get_dispatcher_shared_generic(handle);
824        assert_disabled(me == &staticthread);
825        assert_disabled(me->stack == staticstack);
826        struct thread *ft =
827            thread_mutex_unlock_disabled(handle, &staticthread_lock);
828        assert(ft == NULL);
829
830        // run the next thread, if any
831        struct thread *next = me->next;
832        thread_remove_from_queue(&disp_gen->runq, me);
833        if (next != me) {
834            disp_gen->current = next;
835            disp_resume(handle, &next->regs);
836        } else {
837            disp_gen->current = NULL;
838            disp->haswork = havework_disabled(handle);
839            disp_yield_disabled(handle);
840        }
841    }
842
843    if (me->detached) {
844        // otherwise, we use a dispatcher-local thread to perform cleanup
845        struct dispatcher_generic *dg = get_dispatcher_generic(curdispatcher());
846        thread_mutex_lock(&dg->cleanupthread_lock);
847        if(dg->cleanupthread == NULL) {
848            dg->cleanupthread =
849                thread_create_unrunnable(cleanup_thread, me,
850                                         THREADS_DEFAULT_STACK_BYTES);
851        }
852        thread_init(curdispatcher(), dg->cleanupthread);
853
854        registers_set_initial(&dg->cleanupthread->regs, dg->cleanupthread,
855                              (lvaddr_t)cleanup_thread,
856                              (lvaddr_t)dg->cleanupthread->stack_top, (lvaddr_t)me,
857                              0, 0, 0);
858
859        // Switch to it (on this dispatcher)
860        dispatcher_handle_t handle = disp_disable();
861        struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
862
863        thread_remove_from_queue(&disp_gen->runq, me);
864        thread_enqueue(dg->cleanupthread, &disp_gen->runq);
865        disp_gen->cleanupthread->disp = handle;
866        disp_gen->current = dg->cleanupthread;
867        disp_resume(handle, &dg->cleanupthread->regs);
868    } else {
869        // We're not detached -- wakeup joiner
870        me->return_value = status;
871        me->state = THREAD_STATE_EXITED;
872        thread_cond_signal(&me->exit_condition);
873
874        // Disable and unlock exit lock
875        dispatcher_handle_t handle = disp_disable();
876        struct thread *wakeup =
877            thread_mutex_unlock_disabled(handle, &me->exit_lock);
878        struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
879        struct dispatcher_shared_generic *disp =
880            get_dispatcher_shared_generic(handle);
881
882        assert_disabled(wakeup == NULL);
883
884        // run the next thread, if any
885        struct thread *next = me->next;
886        thread_remove_from_queue(&disp_gen->runq, me);
887        if (next != me) {
888            disp_gen->current = next;
889            disp_resume(handle, &next->regs);
890        } else {
891            disp_gen->current = NULL;
892            disp->haswork = havework_disabled(handle);
893            disp_yield_disabled(handle);
894        }
895    }
896
897    USER_PANIC("should never be reached");
898}
899
900/**
901 * \brief Block the caller, and optionally release a spinlock, while disabled
902 *
903 * The caller is unconditionally blocked, and placed into the given queue
904 * pending a call that will unblock it. After manipulating the queues, and
905 * before switching threds, the given spinlock, if specified, is unlocked.
906 * This function must only be called while disabled.
907 *
908 * This function is intended for use by multi-processor thread synchronisation
909 * functions.
910 *
911 * \param disp Dispatcher pointer
912 * \param queue (Optional) Queue of threads in which to place caller
913 * \param spinlock (Optional) pointer to spinlock
914 *
915 * \returns Argument passed to thread_unblock, when unblocked
916 */
917void *thread_block_and_release_spinlock_disabled(dispatcher_handle_t handle,
918                                                 struct thread **queue,
919                                                 spinlock_t *spinlock)
920{
921    struct dispatcher_shared_generic *disp =
922        get_dispatcher_shared_generic(handle);
923    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
924    struct thread *me = disp_gen->current;
925    struct thread *next = me->next;
926    assert_disabled(next != NULL);
927
928    assert_disabled(me->state == THREAD_STATE_RUNNABLE);
929    me->state = THREAD_STATE_BLOCKED;
930
931    thread_remove_from_queue(&disp_gen->runq, me);
932    if (queue != NULL) {
933        thread_enqueue(me, queue);
934    }
935
936    if (spinlock != NULL) {
937        release_spinlock(spinlock);
938    }
939
940    if (next != me) {
941        assert_disabled(disp_gen->runq != NULL);
942        disp_gen->current = next;
943        disp_switch(handle, &me->regs, &next->regs);
944    } else {
945        assert_disabled(disp_gen->runq == NULL);
946        disp_gen->current = NULL;
947        disp->haswork = havework_disabled(handle);
948        trace_event(TRACE_SUBSYS_THREADS, TRACE_EVENT_THREADS_C_DISP_SAVE, 2);
949        disp_save(handle, &me->regs, true, CPTR_NULL);
950    }
951
952    assert(me->disp == handle); // didn't migrate while asleep
953    return me->wakeup_reason;
954}
955
956/**
957 * \brief Block the calling thread, while disabled
958 *
959 * The caller is unconditionally blocked, and placed into the given queue
960 * pending a call that will unblock it.
961 * This function must only be called while disabled.
962 *
963 * \param disp Dispatcher pointer
964 * \param queue Queue of threads in which to place caller
965 *
966 * \returns Argument passed to thread_unblock, when unblocked
967 */
968void *thread_block_disabled(dispatcher_handle_t disp, struct thread **queue)
969{
970    return thread_block_and_release_spinlock_disabled(disp, queue, NULL);
971}
972
973/**
974 * \brief Block the calling thread, while enabled
975 *
976 * The caller is unconditionally blocked, and placed into the given queue
977 * pending a call that will unblock it.
978 * This function must only be called while enabled.
979 *
980 * \param queue Queue of threads in which to place caller
981 *
982 * \returns Argument passed to thread_unblock, when unblocked
983 */
984void *thread_block(struct thread **queue)
985{
986    return thread_block_disabled(disp_disable(), queue);
987}
988
989/**
990 * \brief Unblock a single thread from a given queue, while disabled
991 *
992 * A single thread is removed from the queue of blocked threads, and awoken.
993 * This function must only be called while disabled.
994 *
995 * \param disp   Dispatcher pointer
996 * \param queue  Queue of threads from which to unblock one
997 * \param reason Value to be returned from thread_block()
998 *
999 * \returns Pointer to thread to be woken on a foreign dispatcher
1000 */
1001struct thread *thread_unblock_one_disabled(dispatcher_handle_t handle,
1002                                           struct thread **queue,
1003                                           void *reason)
1004{
1005    assert_disabled(queue != NULL);
1006
1007    // Any threads in queue?
1008    if (*queue == NULL) {
1009        return NULL;
1010    }
1011
1012    // Wakeup one waiting thread
1013    struct thread *wakeup = thread_dequeue(queue);
1014    wakeup->wakeup_reason = reason;
1015    assert_disabled(wakeup->state == THREAD_STATE_BLOCKED);
1016    wakeup->state = THREAD_STATE_RUNNABLE;
1017
1018    /* enqueue on run queue if it's "our" thread, and not paused */
1019    if (wakeup->disp == handle) {
1020        if (!wakeup->paused) {
1021            struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
1022            thread_enqueue(wakeup, &disp_gen->runq);
1023        }
1024        return NULL;
1025    } else {
1026        return wakeup;
1027    }
1028}
1029
1030/**
1031 * \brief Unblock a single thread from a given queue, while enabled
1032 *
1033 * A single thread is removed from the queue of blocked threads, and awoken.
1034 * This function must only be called while enabled.
1035 *
1036 * \param queue  Queue of threads from which to unblock one
1037 * \param reason Value to be returned from thread_block()
1038 *
1039 * \returns Pointer to thread to be woken on a foreign dispatcher
1040 */
1041struct thread *thread_unblock_one(struct thread **queue, void *reason)
1042{
1043    struct thread *thread;
1044
1045    dispatcher_handle_t handle = disp_disable();
1046    thread = thread_unblock_one_disabled(handle, queue, reason);
1047    disp_enable(handle);
1048    return thread;
1049}
1050
1051/**
1052 * \brief Unblock all threads on a given queue, while disabled
1053 *
1054 * All threads on the queue of blocked threads are awoken.
1055 * This function must only be called while disabled.
1056 *
1057 * \param disp   Dispatcher pointer
1058 * \param queue  Queue of threads to unblock
1059 * \param reason Value to be returned from thread_block()
1060 *
1061 * \returns Pointer to list of threads to be woken on a foreign dispatcher
1062 */
1063struct thread *thread_unblock_all_disabled(dispatcher_handle_t handle,
1064                                           struct thread **queue, void *reason)
1065{
1066    assert_disabled(queue != NULL);
1067    struct thread *wakeupq = NULL;
1068
1069    // Wakeup all waiting threads
1070    while (*queue != NULL) {
1071        struct thread *wakeup = thread_unblock_one_disabled(handle, queue, reason);
1072        if (wakeup != NULL) {
1073            wakeup->next = wakeupq;
1074            wakeupq = wakeup;
1075        }
1076    }
1077
1078    return wakeupq;
1079}
1080
1081extern int _main(int argc, const char *argv[]);
1082
1083/// Thread created in new domain that runs main()
1084static int main_thread(void *params)
1085{
1086    struct spawn_domain_params *p = params;
1087    exit(_main(p->argc, p->argv));
1088    return EXIT_FAILURE;
1089}
1090
1091static bool init_domain_global; // XXX
1092
1093/// Thread created on static stack in new domain that runs init code
1094static int bootstrap_thread(struct spawn_domain_params *params)
1095//int bootstrap_thread(struct spawn_domain_params *params);
1096//int bootstrap_thread(struct spawn_domain_params *params)
1097{
1098    errval_t err;
1099
1100    // Set libc function pointers
1101    barrelfish_libc_glue_init();
1102
1103    if (params == NULL) {
1104        printf("%s: error in creating a thread, NULL parameters given\n",
1105                disp_name());
1106    }
1107    assert(params != NULL);
1108
1109    // Do we have TLS data?
1110    tls_block_init_base = params->tls_init_base;
1111    tls_block_init_len = params->tls_init_len;
1112    tls_block_total_len = params->tls_total_len;
1113
1114    // Initialize subsystems
1115    err = barrelfish_init_onthread(params);
1116    if (err_is_fail(err)) {
1117        DEBUG_ERR(err, "error during libbarrelfish init");
1118        exit(EXIT_FAILURE);
1119        assert(!"exit returned!");
1120    }
1121
1122    // Allocate storage region for real threads
1123    size_t blocksize = sizeof(struct thread) + tls_block_total_len + THREAD_ALIGNMENT;
1124    err = vspace_mmu_aware_init(&thread_slabs_vm, MAX_THREADS * blocksize);
1125    if (err_is_fail(err)) {
1126        USER_PANIC_ERR(err, "vspace_mmu_aware_init for thread region failed\n");
1127    }
1128    slab_init(&thread_slabs, blocksize, refill_thread_slabs);
1129
1130    if (init_domain_global) {
1131        // run main() on this thread, since we can't allocate
1132        if (tls_block_total_len > 0) {
1133            USER_PANIC("unsupported: use of TLS data in bootstrap domain\n");
1134        }
1135        main_thread(params);
1136    } else {
1137        // Start real thread to run main()
1138        struct thread *thread = thread_create(main_thread, params);
1139        assert(thread != NULL);
1140    }
1141
1142    return 0; // ignored
1143}
1144
1145/**
1146 * \brief Initialise thread system while still disabled
1147 *
1148 * This function initialises the thread system while the dispatcher is still
1149 * disabled, before enabling the dispatcher, running the general initialisation
1150 * code, and calling main().
1151 *
1152 * \param disp Dispatcher pointer
1153 * \param init_domain True if we are a bootstrap domain
1154 */
1155void thread_init_disabled(dispatcher_handle_t handle, bool init_domain)
1156{
1157    struct dispatcher_shared_generic *disp =
1158        get_dispatcher_shared_generic(handle);
1159    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
1160    arch_registers_state_t *enabled_area =
1161        dispatcher_get_enabled_save_area(handle);
1162
1163    init_domain_global = init_domain;
1164
1165    // Create the first thread manually
1166    struct thread *thread = &staticthread;
1167    staticthread_lock.locked = true; // XXX: safe while disabled
1168
1169    // waste space for alignment, if unaligned
1170    thread->stack_top = (char *)thread->stack_top
1171        - (lvaddr_t)thread->stack_top % STACK_ALIGNMENT;
1172
1173    // Initialise the first (static) thread
1174    thread_init(handle, thread);
1175    thread->detached = true;
1176
1177#if defined(__x86_64__)
1178    // create segment for TCB
1179    errval_t err = ldt_alloc_segment_disabled(handle, thread,
1180                                              &thread->thread_seg_selector);
1181    if (err_is_fail(err)) {
1182        USER_PANIC_ERR(err, "error allocating LDT segment for first thread");
1183    }
1184#endif
1185
1186    uintptr_t param;
1187    registers_get_param(enabled_area, &param);
1188
1189    registers_set_initial(&thread->regs, thread, (lvaddr_t)thread_entry,
1190                          /* TODO: pass stack base and limit, choose in arch
1191                           * code (possibly setting up other hints on stack) */
1192                          (lvaddr_t)thread->stack_top,
1193                          (lvaddr_t)bootstrap_thread, param, 0, 0);
1194
1195    // Switch to it (always on this dispatcher)
1196    thread->disp = handle;
1197    thread_enqueue(thread, &disp_gen->runq);
1198    disp_gen->current = thread;
1199    disp->haswork = true;
1200    disp_resume(handle, &thread->regs);
1201}
1202
1203/**
1204 * \brief Called on the remote core when spanning a domain across cores
1205 *
1206 * Runs the provided thread after enqueuing it and enabling the dispatcher
1207 */
1208void thread_init_remote(dispatcher_handle_t handle, struct thread *thread)
1209{
1210    struct dispatcher_shared_generic *disp =
1211        get_dispatcher_shared_generic(handle);
1212    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
1213    thread_enqueue(thread, &disp_gen->runq);
1214    disp_gen->current = thread;
1215    disp->haswork = true;
1216    disp_resume(handle, &thread->regs);
1217}
1218
1219/**
1220 * \brief Prepare to span the current domain
1221 *
1222 * This is a kludge. It is called from domain.c when creating a new dispatcher,
1223 * and is responsible for pre-allocating all the storage that might be needed
1224 * for thread metadata in the slab allocator. It can go away once we sanely
1225 * manage the vspace across multiple dispatchers in a domain.
1226 */
1227void threads_prepare_to_span(dispatcher_handle_t newdh)
1228{
1229    static bool called;
1230
1231    if (!called) {
1232        called = true;
1233
1234        thread_mutex_lock(&thread_slabs_mutex);
1235        acquire_spinlock(&thread_slabs_spinlock);
1236
1237        while (slab_freecount(&thread_slabs) < MAX_THREADS - 1) {
1238            size_t size;
1239            void *buf;
1240            errval_t err;
1241
1242            size_t blocksize = sizeof(struct thread) + tls_block_total_len;
1243            err = vspace_mmu_aware_map(&thread_slabs_vm, 64 * blocksize,
1244                                       &buf, &size);
1245            if (err_is_fail(err)) {
1246                if (err_no(err) == LIB_ERR_VSPACE_MMU_AWARE_NO_SPACE) {
1247                    // we've wasted space with fragmentation
1248                    // cross our fingers and hope for the best...
1249                    break;
1250                }
1251                USER_PANIC_ERR(err, "in vspace_mmu_aware_map while prefilling "
1252                               "thread slabs\n");
1253            }
1254
1255            slab_grow(&thread_slabs, buf, size);
1256        }
1257
1258        release_spinlock(&thread_slabs_spinlock);
1259        thread_mutex_unlock(&thread_slabs_mutex);
1260    }
1261}
1262
1263/**
1264 * \brief Pause (suspend execution of) the given thread, and optionally capture its register state
1265 *
1266 * The thread will not be run, until a subsequent call to thread_resume()
1267 */
1268void thread_pause_and_capture_state(struct thread *thread,
1269                                    arch_registers_state_t **ret_regs)
1270{
1271    assert(thread != NULL);
1272    dispatcher_handle_t dh = disp_disable();
1273    struct dispatcher_generic *disp = get_dispatcher_generic(dh);
1274    if (thread->disp == dh) {
1275        if (!thread->paused) {
1276            thread->paused = true;
1277            if (thread == disp->current) { // doesn't make much sense...
1278                sys_print("Warning: pausing current thread!\n",100);
1279                assert_disabled(thread->state == THREAD_STATE_RUNNABLE);
1280                thread_block_disabled(dh, NULL);
1281            } else if (thread->state == THREAD_STATE_RUNNABLE) {
1282                thread_remove_from_queue(&disp->runq, thread);
1283            }
1284        }
1285        if (ret_regs != NULL) {
1286            *ret_regs = &thread->regs;
1287        }
1288    } else {
1289        USER_PANIC("NYI: remote dispatcher thread_pause()");
1290    }
1291    disp_enable(dh);
1292}
1293
1294/**
1295 * \brief Pause (suspend execution of) the given thread
1296 *
1297 * The thread will not be run, until a subsequent call to thread_resume()
1298 */
1299void thread_pause(struct thread *thread)
1300{
1301    thread_pause_and_capture_state(thread, NULL);
1302}
1303
1304/**
1305 * \brief Resume execution of a thread previously suspended by thread_pause()
1306 */
1307void thread_resume(struct thread *thread)
1308{
1309    assert(thread != NULL);
1310    dispatcher_handle_t dh = disp_disable();
1311    struct dispatcher_generic *disp = get_dispatcher_generic(dh);
1312    if (thread->disp == dh) {
1313        if (thread->paused) {
1314            thread->paused = false;
1315            if (thread->state == THREAD_STATE_RUNNABLE) {
1316                thread_enqueue(thread, &disp->runq);
1317            }
1318        }
1319    } else {
1320        USER_PANIC("NYI: remote dispatcher thread_resume()");
1321    }
1322    disp_enable(dh);
1323}
1324
1325/**
1326 * \brief Set old-style thread-local storage pointer.
1327 * \param p   User's pointer
1328 */
1329void thread_set_tls(void *p)
1330{
1331    struct thread *me = thread_self();
1332    me->userptr = p;
1333}
1334
1335void thread_set_tls_key(int key, void *p)
1336{
1337    struct thread *me = thread_self();
1338    me->userptrs[key] = p;
1339}
1340
1341/**
1342 * \brief Return old-style thread-local storage pointer.
1343 * \return User's pointer, previously passed to thread_set_tls()
1344 */
1345void *thread_get_tls(void)
1346{
1347    struct thread *me = thread_self();
1348    return me->userptr;
1349}
1350
1351void *thread_get_tls_key(int key)
1352{
1353    struct thread *me = thread_self();
1354    return me->userptrs[key];
1355}
1356
1357/**
1358 * \brief Set the exception handler function for the current thread.
1359 *        Optionally also change its stack, and return the old values.
1360 *
1361 * \param newhandler New exception handler. Pass NULL to disable an existing handler.
1362 * \param oldhandler If non-NULL, returns previous exception handler
1363 * \param new_stack_base If non-NULL, sets a new exception handler stack (base)
1364 * \param new_stack_top  If non-NULL, sets a new exception handler stack (top)
1365 * \param old_stack_base If non-NULL, returns previous stack base
1366 * \param old_stack_top If non-NULL, returns previous stack top
1367 */
1368errval_t thread_set_exception_handler(exception_handler_fn newhandler,
1369                                      exception_handler_fn *oldhandler,
1370                                      void *new_stack_base, void *new_stack_top,
1371                                      void **old_stack_base, void **old_stack_top)
1372{
1373    struct thread *me = thread_self();
1374
1375    if (oldhandler != NULL) {
1376        *oldhandler = me->exception_handler;
1377    }
1378
1379    if (old_stack_base != NULL) {
1380        *old_stack_base = me->exception_stack;
1381    }
1382
1383    if (old_stack_top != NULL) {
1384        *old_stack_top = me->exception_stack_top;
1385    }
1386
1387    me->exception_handler = newhandler;
1388
1389    if (new_stack_base != NULL && new_stack_top != NULL) {
1390        me->exception_stack = new_stack_base;
1391        me->exception_stack_top = new_stack_top;
1392    }
1393
1394    return SYS_ERR_OK;
1395}
1396
1397static void exception_handler_wrapper(arch_registers_state_t *cpuframe,
1398                                      uintptr_t hack_arg, void *addr)
1399{
1400    struct thread *me = thread_self();
1401
1402    assert(me->in_exception);
1403    assert(me->exception_handler != NULL);
1404
1405    // XXX: unpack hack arg
1406    enum exception_type type = hack_arg >> 16;
1407    int subtype = hack_arg & 0xffff;
1408
1409    // run handler
1410    me->exception_handler(type, subtype, addr, cpuframe);
1411
1412    // resume state
1413    dispatcher_handle_t dh = disp_disable();
1414    struct dispatcher_generic *disp_gen = get_dispatcher_generic(dh);
1415    //memcpy(&me->regs, cpuframe, sizeof(arch_registers_state_t));
1416
1417    assert_disabled(me->in_exception);
1418    me->in_exception = false;
1419
1420    assert_disabled(disp_gen->current == me);
1421    disp_resume(dh, cpuframe);
1422}
1423
1424#if 0
1425void thread_debug_regs(struct thread *t);
1426void thread_debug_regs(struct thread *t)
1427{
1428  printf("%d: RIP = %lx, RSP = %lx\n", disp_get_domain_id(),
1429	 t->regs.rip, t->regs.rsp);
1430  uint64_t *stack = (uint64_t *)t->regs.rsp;
1431  printf("%d: ", disp_get_domain_id());
1432  for(int i = 0; i < 30; i++) {
1433    printf("%lx ", stack[i]);
1434  }
1435  printf("\n");
1436}
1437#endif
1438
1439/**
1440 * \brief Deliver an exception to the current thread, and resume.
1441 *
1442 * This may only be called from the dispatcher (on its stack and while
1443 * disabled!).
1444 *
1445 * \param handle Dispatcher handle
1446 * \param type   Exception type
1447 * \param subtype Exception subtype
1448 * \param addr   Exception address
1449 * \param regs   CPU register state at time of exception
1450 */
1451void thread_deliver_exception_disabled(dispatcher_handle_t handle,
1452                                       enum exception_type type, int subtype,
1453                                       void *addr, arch_registers_state_t *regs)
1454{
1455    struct dispatcher_generic *disp_gen = get_dispatcher_generic(handle);
1456    struct thread *thread = disp_gen->current;
1457    assert_disabled(thread != NULL);
1458    assert_disabled(disp_gen->runq != NULL);
1459
1460    // can we deliver the exception?
1461    if (thread->exception_handler == NULL || thread->exception_stack_top == NULL
1462        || thread->in_exception) {
1463        if (thread->in_exception) {
1464            sys_print("Can't deliver exception to thread: already in handler\n",
1465                      100);
1466        } else {
1467            sys_print("Can't deliver exception to thread: handler not set\n",
1468                      100);
1469        }
1470
1471        // warn on stack overflow.
1472        lvaddr_t sp = (lvaddr_t) registers_get_sp(regs);
1473        if (sp < (lvaddr_t)thread->stack ||
1474            sp > (lvaddr_t)thread->stack_top) {
1475            char str[256];
1476            snprintf(str, sizeof(str), "Error: stack bounds exceeded: sp = 0x%"
1477                     PRIxPTR " but [bottom, top] = [0x%" PRIxPTR ", 0x%"
1478                     PRIxPTR "]\n", (lvaddr_t) sp, (lvaddr_t) thread->stack,
1479                     (lvaddr_t) thread->stack_top);
1480            sys_print(str, sizeof(str));
1481        }
1482
1483        // TODO: actually delete the thread!
1484        disp_gen->current = NULL;
1485        thread_remove_from_queue(&disp_gen->runq, thread);
1486        return;
1487    }
1488
1489    thread->in_exception = true;
1490
1491    lvaddr_t stack_top = (lvaddr_t)thread->exception_stack_top;
1492
1493    // save thread's state at time of fault on top of exception stack
1494    stack_top -= sizeof(arch_registers_state_t);
1495    arch_registers_state_t *cpuframe = (void *)stack_top;
1496    memcpy(cpuframe, regs, sizeof(arch_registers_state_t));
1497
1498    // align stack
1499    stack_top -= stack_top % STACK_ALIGNMENT;
1500
1501    // XXX: sanity-check to ensure we have a sensible amount of exception stack left
1502    assert_disabled(stack_top > (lvaddr_t)thread->exception_stack + 8192);
1503
1504    // XXX: pack two small ints together to fit into a single register
1505    uintptr_t hack_arg = (uintptr_t)type << 16 | (subtype & 0xffff);
1506
1507    registers_set_initial(&thread->regs, thread,
1508                          (lvaddr_t)exception_handler_wrapper,
1509                          stack_top, (lvaddr_t)cpuframe, 0,
1510                          hack_arg, (lvaddr_t)addr);
1511
1512    disp_resume(handle, &thread->regs);
1513}
1514