1// Copyright 2016 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <object/thread_dispatcher.h>
8
9#include <assert.h>
10#include <err.h>
11#include <inttypes.h>
12#include <platform.h>
13#include <string.h>
14#include <trace.h>
15
16#include <arch/debugger.h>
17#include <arch/exception.h>
18
19#include <kernel/thread.h>
20#include <vm/kstack.h>
21#include <vm/vm.h>
22#include <vm/vm_address_region.h>
23#include <vm/vm_aspace.h>
24#include <vm/vm_object_paged.h>
25
26#include <zircon/rights.h>
27#include <zircon/syscalls/debug.h>
28#include <zircon/types.h>
29
30#include <object/c_user_thread.h>
31#include <object/excp_port.h>
32#include <object/handle.h>
33#include <object/job_dispatcher.h>
34#include <object/process_dispatcher.h>
35
36#include <fbl/algorithm.h>
37#include <fbl/alloc_checker.h>
38#include <fbl/auto_call.h>
39#include <fbl/auto_lock.h>
40
41#define LOCAL_TRACE 0
42
43// static
44zx_status_t ThreadDispatcher::Create(fbl::RefPtr<ProcessDispatcher> process, uint32_t flags,
45                                     fbl::StringPiece name,
46                                     fbl::RefPtr<Dispatcher>* out_dispatcher,
47                                     zx_rights_t* out_rights) {
48    fbl::AllocChecker ac;
49    auto disp = fbl::AdoptRef(new (&ac) ThreadDispatcher(fbl::move(process), flags));
50    if (!ac.check())
51        return ZX_ERR_NO_MEMORY;
52
53    auto result = disp->Initialize(name.data(), name.length());
54    if (result != ZX_OK)
55        return result;
56
57    *out_rights = ZX_DEFAULT_THREAD_RIGHTS;
58    *out_dispatcher = fbl::move(disp);
59    return ZX_OK;
60}
61
62ThreadDispatcher::ThreadDispatcher(fbl::RefPtr<ProcessDispatcher> process,
63                                   uint32_t flags)
64    : process_(fbl::move(process)) {
65    LTRACE_ENTRY_OBJ;
66}
67
68ThreadDispatcher::~ThreadDispatcher() {
69    LTRACE_ENTRY_OBJ;
70
71    DEBUG_ASSERT(&thread_ != get_current_thread());
72
73    switch (state_) {
74    case State::DEAD: {
75        // join the LK thread before doing anything else to clean up LK state and ensure
76        // the thread we're destroying has stopped.
77        LTRACEF("joining LK thread to clean up state\n");
78        __UNUSED auto ret = thread_join(&thread_, nullptr, ZX_TIME_INFINITE);
79        LTRACEF("done joining LK thread\n");
80        DEBUG_ASSERT_MSG(ret == ZX_OK, "thread_join returned something other than ZX_OK\n");
81        break;
82    }
83    case State::INITIAL:
84        // this gets a pass, we can destruct a partially constructed thread
85        break;
86    case State::INITIALIZED:
87        // as we've been initialized previously, forget the LK thread.
88        // note that thread_forget is not called for self since the thread is not running
89        thread_forget(&thread_);
90        break;
91    default:
92        DEBUG_ASSERT_MSG(false, "bad state %s, this %p\n", StateToString(state_), this);
93    }
94
95    event_destroy(&exception_event_);
96}
97
98// complete initialization of the thread object outside of the constructor
99zx_status_t ThreadDispatcher::Initialize(const char* name, size_t len) {
100    LTRACE_ENTRY_OBJ;
101
102    Guard<fbl::Mutex> guard{get_lock()};
103
104    DEBUG_ASSERT(state_ == State::INITIAL);
105
106    // Make sure LK's max name length agrees with ours.
107    static_assert(THREAD_NAME_LENGTH == ZX_MAX_NAME_LEN, "name length issue");
108    if (len >= ZX_MAX_NAME_LEN)
109        len = ZX_MAX_NAME_LEN - 1;
110
111    char thread_name[THREAD_NAME_LENGTH];
112    memcpy(thread_name, name, len);
113    memset(thread_name + len, 0, ZX_MAX_NAME_LEN - len);
114
115    // create an underlying LK thread
116    thread_t* lkthread = thread_create_etc(
117        &thread_, thread_name, StartRoutine, this, DEFAULT_PRIORITY, nullptr);
118
119    if (!lkthread) {
120        TRACEF("error creating thread\n");
121        return ZX_ERR_NO_MEMORY;
122    }
123    DEBUG_ASSERT(lkthread == &thread_);
124
125    // register an event handler with the LK kernel
126    thread_set_user_callback(&thread_, &ThreadUserCallback);
127
128    // set the per-thread pointer
129    lkthread->user_thread = reinterpret_cast<void*>(this);
130
131    // associate the proc's address space with this thread
132    process_->aspace()->AttachToThread(lkthread);
133
134    // we've entered the initialized state
135    SetStateLocked(State::INITIALIZED);
136
137    return ZX_OK;
138}
139
140zx_status_t ThreadDispatcher::set_name(const char* name, size_t len) {
141    canary_.Assert();
142
143    // ignore characters after the first NUL
144    len = strnlen(name, len);
145
146    if (len >= ZX_MAX_NAME_LEN)
147        len = ZX_MAX_NAME_LEN - 1;
148
149    Guard<SpinLock, IrqSave> guard{&name_lock_};
150    memcpy(thread_.name, name, len);
151    memset(thread_.name + len, 0, ZX_MAX_NAME_LEN - len);
152    return ZX_OK;
153}
154
155void ThreadDispatcher::get_name(char out_name[ZX_MAX_NAME_LEN]) const {
156    canary_.Assert();
157
158    Guard<SpinLock, IrqSave> guard{&name_lock_};
159    memset(out_name, 0, ZX_MAX_NAME_LEN);
160    strlcpy(out_name, thread_.name, ZX_MAX_NAME_LEN);
161}
162
163// start a thread
164zx_status_t ThreadDispatcher::Start(uintptr_t entry, uintptr_t sp,
165                                    uintptr_t arg1, uintptr_t arg2,
166                                    bool initial_thread) {
167    canary_.Assert();
168
169    LTRACE_ENTRY_OBJ;
170
171    is_initial_thread_ = initial_thread;
172
173    Guard<fbl::Mutex> guard{get_lock()};
174
175    if (state_ != State::INITIALIZED)
176        return ZX_ERR_BAD_STATE;
177
178    // save the user space entry state
179    user_entry_ = entry;
180    user_sp_ = sp;
181    user_arg1_ = arg1;
182    user_arg2_ = arg2;
183
184    // add ourselves to the process, which may fail if the process is in a dead state
185    auto ret = process_->AddThread(this, initial_thread);
186    if (ret < 0)
187        return ret;
188
189    // bump the ref on this object that the LK thread state will now own until the lk thread has exited
190    AddRef();
191
192    // mark ourselves as running and resume the kernel thread
193    SetStateLocked(State::RUNNING);
194
195    thread_.user_tid = get_koid();
196    thread_.user_pid = process_->get_koid();
197    thread_resume(&thread_);
198
199    return ZX_OK;
200}
201
202// called in the context of our thread
203void ThreadDispatcher::Exit() {
204    canary_.Assert();
205
206    LTRACE_ENTRY_OBJ;
207
208    // only valid to call this on the current thread
209    DEBUG_ASSERT(get_current_thread() == &thread_);
210
211    {
212        Guard<fbl::Mutex> guard{get_lock()};
213
214        DEBUG_ASSERT(state_ == State::RUNNING || state_ == State::DYING);
215
216        SetStateLocked(State::DYING);
217    }
218
219    // exit here
220    // this will recurse back to us in ::Exiting()
221    thread_exit(0);
222
223    __UNREACHABLE;
224}
225
226void ThreadDispatcher::Kill() {
227    canary_.Assert();
228
229    LTRACE_ENTRY_OBJ;
230
231    Guard<fbl::Mutex> guard{get_lock()};
232
233    switch (state_) {
234    case State::INITIAL:
235    case State::INITIALIZED:
236        // thread was never started, leave in this state
237        break;
238    case State::RUNNING:
239    case State::SUSPENDED:
240        // deliver a kernel kill signal to the thread
241        thread_kill(&thread_);
242
243        // enter the dying state
244        SetStateLocked(State::DYING);
245        break;
246    case State::DYING:
247    case State::DEAD:
248        // already going down
249        break;
250    }
251}
252
253zx_status_t ThreadDispatcher::Suspend() {
254    canary_.Assert();
255
256    LTRACE_ENTRY_OBJ;
257
258    Guard<fbl::Mutex> guard{get_lock()};
259
260    LTRACEF("%p: state %s\n", this, StateToString(state_));
261
262    if (state_ != State::RUNNING && state_ != State::SUSPENDED)
263        return ZX_ERR_BAD_STATE;
264
265    DEBUG_ASSERT(suspend_count_ >= 0);
266    suspend_count_++;
267    if (suspend_count_ == 1)
268        return thread_suspend(&thread_);
269
270    // It was already suspended.
271    return ZX_OK;
272}
273
274zx_status_t ThreadDispatcher::Resume() {
275    canary_.Assert();
276
277    LTRACE_ENTRY_OBJ;
278
279    Guard<fbl::Mutex> guard{get_lock()};
280
281    LTRACEF("%p: state %s\n", this, StateToString(state_));
282
283    // TODO(brettw) ZX-1072 :The suspend_count_ == 0 check can be removed and converted to an
284    // assertion when the bug is fixed. In that case, the suspend token shouldn't be calling Resume
285    // unless it's previously suspended it. Currently callers can bypass this invariant using the
286    // legacy suspend/resume API so give a good error.
287    // DEBUG_ASSERT(suspend_count_ > 0)
288    if ((state_ != State::RUNNING && state_ != State::SUSPENDED) || suspend_count_ == 0)
289        return ZX_ERR_BAD_STATE;
290
291    suspend_count_--;
292    if (suspend_count_ == 0)
293        thread_resume(&thread_);
294    // Otherwise there's still an out-standing Suspend() call so keep it suspended.
295    return ZX_OK;
296}
297
298static void ThreadCleanupDpc(dpc_t* d) {
299    LTRACEF("dpc %p\n", d);
300
301    ThreadDispatcher* t = reinterpret_cast<ThreadDispatcher*>(d->arg);
302    DEBUG_ASSERT(t);
303
304    delete t;
305}
306
307void ThreadDispatcher::Exiting() {
308    canary_.Assert();
309
310    LTRACE_ENTRY_OBJ;
311
312    // Notify a debugger if attached. Do this before marking the thread as
313    // dead: the debugger expects to see the thread in the DYING state, it may
314    // try to read thread registers. The debugger still has to handle the case
315    // where the process is also dying (and thus the thread could transition
316    // DYING->DEAD from underneath it), but that's life (or death :-)).
317    // N.B. OnThreadExitForDebugger will block in ExceptionHandlerExchange, so
318    // don't hold the process's |state_lock_| across the call.
319    {
320        fbl::RefPtr<ExceptionPort> eport(process_->debugger_exception_port());
321        if (eport) {
322            eport->OnThreadExitForDebugger(this);
323        }
324    }
325
326    // Mark the thread as dead. Do this before removing the thread from the
327    // process because if this is the last thread then the process will be
328    // marked dead, and we don't want to have a state where the process is
329    // dead but one thread is not.
330    {
331        Guard<fbl::Mutex> guard{get_lock()};
332
333        DEBUG_ASSERT(state_ == State::DYING);
334
335        // put ourselves into the dead state
336        SetStateLocked(State::DEAD);
337    }
338
339    // remove ourselves from our parent process's view
340    process_->RemoveThread(this);
341
342    // drop LK's reference
343    if (Release()) {
344        // We're the last reference, so will need to destruct ourself while running, which is not possible
345        // Use a dpc to pull this off
346        cleanup_dpc_.func = ThreadCleanupDpc;
347        cleanup_dpc_.arg = this;
348
349        // disable interrupts before queuing the dpc to prevent starving the DPC thread if it starts running
350        // before we're completed.
351        // disabling interrupts effectively raises us to maximum priority on this cpu.
352        // note this is only safe because we're about to exit the thread permanently so the context
353        // switch will effectively reenable interrupts in the new thread.
354        arch_disable_ints();
355
356        // queue without reschdule since us exiting is a reschedule event already
357        dpc_queue(&cleanup_dpc_, false);
358    }
359
360    // after this point the thread will stop permanently
361    LTRACE_EXIT_OBJ;
362}
363
364void ThreadDispatcher::Suspending() {
365    LTRACE_ENTRY_OBJ;
366
367    // Update the state before sending any notifications out. We want the
368    // receiver to see the new state.
369    {
370        Guard<fbl::Mutex> guard{get_lock()};
371
372        DEBUG_ASSERT(state_ == State::RUNNING || state_ == State::DYING);
373        if (state_ == State::RUNNING) {
374            SetStateLocked(State::SUSPENDED);
375        }
376    }
377
378    LTRACE_EXIT_OBJ;
379}
380
381void ThreadDispatcher::Resuming() {
382    LTRACE_ENTRY_OBJ;
383
384    // Update the state before sending any notifications out. We want the
385    // receiver to see the new state.
386    {
387        Guard<fbl::Mutex> guard{get_lock()};
388
389        DEBUG_ASSERT(state_ == State::SUSPENDED || state_ == State::DYING);
390        if (state_ == State::SUSPENDED) {
391            SetStateLocked(State::RUNNING);
392        }
393    }
394
395    LTRACE_EXIT_OBJ;
396}
397
398// low level LK callback in thread's context just before exiting
399void ThreadDispatcher::ThreadUserCallback(enum thread_user_state_change new_state, void* arg) {
400    ThreadDispatcher* t = reinterpret_cast<ThreadDispatcher*>(arg);
401
402    switch (new_state) {
403    case THREAD_USER_STATE_EXIT:
404        t->Exiting();
405        return;
406    case THREAD_USER_STATE_SUSPEND:
407        t->Suspending();
408        return;
409    case THREAD_USER_STATE_RESUME:
410        t->Resuming();
411        return;
412    }
413}
414
415// low level LK entry point for the thread
416int ThreadDispatcher::StartRoutine(void* arg) {
417    LTRACE_ENTRY;
418
419    ThreadDispatcher* t = (ThreadDispatcher*)arg;
420
421    // Notify job debugger if attached.
422    if (t->is_initial_thread_) {
423      t->process_->OnProcessStartForJobDebugger(t);
424    }
425
426    // Notify debugger if attached.
427    // This is done by first obtaining our own reference to the port so the
428    // test can be done safely. Note that this function doesn't return so we
429    // need the reference to go out of scope before then.
430    {
431        fbl::RefPtr<ExceptionPort> debugger_port(t->process_->debugger_exception_port());
432        if (debugger_port) {
433            debugger_port->OnThreadStartForDebugger(t);
434        }
435    }
436
437    LTRACEF("arch_enter_uspace SP: %#" PRIxPTR " PC: %#" PRIxPTR
438            ", ARG1: %#" PRIxPTR ", ARG2: %#" PRIxPTR "\n",
439            t->user_sp_, t->user_entry_, t->user_arg1_, t->user_arg2_);
440
441    // switch to user mode and start the process
442    arch_enter_uspace(t->user_entry_, t->user_sp_,
443                      t->user_arg1_, t->user_arg2_);
444
445    __UNREACHABLE;
446}
447
448void ThreadDispatcher::SetStateLocked(State state) {
449    canary_.Assert();
450
451    LTRACEF("thread %p: state %u (%s)\n", this, static_cast<unsigned int>(state), StateToString(state));
452
453    DEBUG_ASSERT(get_lock()->lock().IsHeld());
454
455    state_ = state;
456
457    switch (state) {
458    case State::RUNNING:
459        UpdateStateLocked(ZX_THREAD_SUSPENDED, ZX_THREAD_RUNNING);
460        break;
461    case State::SUSPENDED:
462        UpdateStateLocked(ZX_THREAD_RUNNING, ZX_THREAD_SUSPENDED);
463        break;
464    case State::DEAD:
465        UpdateStateLocked(ZX_THREAD_RUNNING | ZX_THREAD_SUSPENDED, ZX_THREAD_TERMINATED);
466        break;
467    default:
468        // Nothing to do.
469        // In particular, for the DYING state we don't modify the SUSPENDED
470        // or RUNNING signals: For observer purposes they'll only be interested
471        // in the transition from {SUSPENDED,RUNNING} to DEAD.
472        break;
473    }
474}
475
476zx_status_t ThreadDispatcher::SetExceptionPort(fbl::RefPtr<ExceptionPort> eport) {
477    canary_.Assert();
478
479    DEBUG_ASSERT(eport->type() == ExceptionPort::Type::THREAD);
480
481    // Lock |state_lock_| to ensure the thread doesn't transition to dead
482    // while we're setting the exception handler.
483    Guard<fbl::Mutex> guard{get_lock()};
484    if (state_ == State::DEAD)
485        return ZX_ERR_NOT_FOUND;
486    if (exception_port_)
487        return ZX_ERR_ALREADY_BOUND;
488    exception_port_ = eport;
489
490    return ZX_OK;
491}
492
493bool ThreadDispatcher::ResetExceptionPort(bool quietly) {
494    canary_.Assert();
495
496    fbl::RefPtr<ExceptionPort> eport;
497
498    // Remove the exception handler first. If the thread resumes execution
499    // we don't want it to hit another exception and get back into
500    // ExceptionHandlerExchange.
501    {
502        Guard<fbl::Mutex> guard{get_lock()};
503        exception_port_.swap(eport);
504        if (eport == nullptr) {
505            // Attempted to unbind when no exception port is bound.
506            return false;
507        }
508        // This method must guarantee that no caller will return until
509        // OnTargetUnbind has been called on the port-to-unbind.
510        // This becomes important when a manual unbind races with a
511        // PortDispatcher::on_zero_handles auto-unbind.
512        //
513        // If OnTargetUnbind were called outside of the lock, it would lead to
514        // a race (for threads A and B):
515        //
516        //   A: Calls ResetExceptionPort; acquires the lock
517        //   A: Sees a non-null exception_port_, swaps it into the eport local.
518        //      exception_port_ is now null.
519        //   A: Releases the lock
520        //
521        //   B: Calls ResetExceptionPort; acquires the lock
522        //   B: Sees a null exception_port_ and returns. But OnTargetUnbind()
523        //      hasn't yet been called for the port.
524        //
525        // So, call it before releasing the lock
526        eport->OnTargetUnbind();
527    }
528
529    if (!quietly)
530        OnExceptionPortRemoval(eport);
531    return true;
532}
533
534fbl::RefPtr<ExceptionPort> ThreadDispatcher::exception_port() {
535    canary_.Assert();
536
537    Guard<fbl::Mutex> guard{get_lock()};
538    return exception_port_;
539}
540
541zx_status_t ThreadDispatcher::ExceptionHandlerExchange(
542    fbl::RefPtr<ExceptionPort> eport,
543    const zx_exception_report_t* report,
544    const arch_exception_context_t* arch_context,
545    ExceptionStatus* out_estatus) {
546    canary_.Assert();
547
548    LTRACE_ENTRY_OBJ;
549
550    // Note: As far as userspace is concerned there is no state change that we would notify state
551    // tracker observers of, currently.
552    //
553    // Send message, wait for reply. Note that there is a "race" that we need handle: We need to
554    // send the exception report before going to sleep, but what if the receiver of the report gets
555    // it and processes it before we are asleep? This is handled by locking state_lock_ in places
556    // where the handler can see/modify thread state.
557
558    {
559        Guard<fbl::Mutex> guard{get_lock()};
560
561        // Mark that we're in an exception.
562        thread_.exception_context = arch_context;
563
564        // For GetExceptionReport.
565        exception_report_ = report;
566
567        // For OnExceptionPortRemoval in case the port is unbound.
568        DEBUG_ASSERT(exception_wait_port_ == nullptr);
569        exception_wait_port_ = eport;
570
571        exception_status_ = ExceptionStatus::UNPROCESSED;
572    }
573
574    zx_status_t status;
575
576    {
577        AutoBlocked by(Blocked::EXCEPTION);
578
579        // There's no need to send the message under the lock, but we do need to make sure our
580        // exception state and blocked state are up to date before sending the message. Otherwise, a
581        // debugger could get the packet and observe them before we've updated them. Thus, send the
582        // packet after updating both exception state and blocked state.
583        status = eport->SendPacket(this, report->header.type);
584        if (status != ZX_OK) {
585            // Can't send the request to the exception handler. Report the error, which will
586            // probably kill the process.
587            LTRACEF("SendPacket returned %d\n", status);
588            return status;
589        }
590
591        // Continue to wait for the exception response if we get suspended.
592        // If it is suspended, the suspension will be processed after the
593        // exception response is received (requiring a second resume).
594        // Exceptions and suspensions are essentially treated orthogonally.
595
596        do {
597            status = event_wait_with_mask(&exception_event_, THREAD_SIGNAL_SUSPEND);
598        } while (status == ZX_ERR_INTERNAL_INTR_RETRY);
599    }
600
601    Guard<fbl::Mutex> guard{get_lock()};
602
603    // Note: If |status| != ZX_OK, then |exception_status_| is still
604    // ExceptionStatus::UNPROCESSED.
605    switch (status) {
606    case ZX_OK:
607        // It's critical that at this point the event no longer be armed.
608        // Otherwise the next time we get an exception we'll fall right through
609        // without waiting for an exception response.
610        // Note: The event could be signaled after event_wait_deadline returns
611        // if the thread was killed while the event was signaled.
612        DEBUG_ASSERT(!event_signaled(&exception_event_));
613        DEBUG_ASSERT(exception_status_ != ExceptionStatus::IDLE &&
614                     exception_status_ != ExceptionStatus::UNPROCESSED);
615        break;
616    case ZX_ERR_INTERNAL_INTR_KILLED:
617        // Thread was killed.
618        break;
619    default:
620        ASSERT_MSG(false, "unexpected exception result: %d\n", status);
621        __UNREACHABLE;
622    }
623
624    exception_wait_port_.reset();
625    exception_report_ = nullptr;
626    thread_.exception_context = nullptr;
627
628    *out_estatus = exception_status_;
629    exception_status_ = ExceptionStatus::IDLE;
630
631    LTRACEF("returning status %d, estatus %d\n",
632            status, static_cast<int>(*out_estatus));
633    return status;
634}
635
636// TODO(brettw) ZX-1072 Remove this when all callers are updated to use
637// the exception port variant below.
638zx_status_t ThreadDispatcher::MarkExceptionHandled(ExceptionStatus estatus) {
639    canary_.Assert();
640
641    LTRACEF("obj %p, estatus %d\n", this, static_cast<int>(estatus));
642    DEBUG_ASSERT(estatus != ExceptionStatus::IDLE &&
643                 estatus != ExceptionStatus::UNPROCESSED);
644
645    Guard<fbl::Mutex> guard{get_lock()};
646    if (!InExceptionLocked())
647        return ZX_ERR_BAD_STATE;
648
649    // The thread can be in several states at this point. Alas this is a bit
650    // complicated because there is a window in the middle of
651    // ExceptionHandlerExchange between the thread going to sleep and after
652    // the thread waking up where we can obtain the lock. Things are further
653    // complicated by the fact that OnExceptionPortRemoval could get there
654    // first, or we might get called a second time for the same exception.
655    // It's critical that we don't re-arm the event after the thread wakes up.
656    // To keep things simple we take a first-one-wins approach.
657    DEBUG_ASSERT(exception_status_ != ExceptionStatus::IDLE);
658    if (exception_status_ != ExceptionStatus::UNPROCESSED)
659        return ZX_ERR_BAD_STATE;
660
661    exception_status_ = estatus;
662    event_signal(&exception_event_, true);
663    return ZX_OK;
664}
665
666zx_status_t ThreadDispatcher::MarkExceptionHandled(PortDispatcher* eport,
667                                                   ExceptionStatus estatus) {
668    canary_.Assert();
669
670    LTRACEF("obj %p, estatus %d\n", this, static_cast<int>(estatus));
671    DEBUG_ASSERT(estatus != ExceptionStatus::IDLE &&
672                 estatus != ExceptionStatus::UNPROCESSED);
673
674    Guard<fbl::Mutex> guard{get_lock()};
675    if (!InExceptionLocked())
676        return ZX_ERR_BAD_STATE;
677
678    // The exception port isn't used directly but is instead proof that the caller has permission to
679    // resume from the exception. So validate that it corresponds to the task being resumed.
680    if (!exception_wait_port_->PortMatches(eport, false))
681        return ZX_ERR_ACCESS_DENIED;
682
683    // The thread can be in several states at this point. Alas this is a bit complicated because
684    // there is a window in the middle of ExceptionHandlerExchange between the thread going to sleep
685    // and after the thread waking up where we can obtain the lock. Things are further complicated
686    // by the fact that OnExceptionPortRemoval could get there first, or we might get called a
687    // second time for the same exception. It's critical that we don't re-arm the event after the
688    // thread wakes up. To keep things simple we take a first-one-wins approach.
689    DEBUG_ASSERT(exception_status_ != ExceptionStatus::IDLE);
690    if (exception_status_ != ExceptionStatus::UNPROCESSED)
691        return ZX_ERR_BAD_STATE;
692
693    exception_status_ = estatus;
694    event_signal(&exception_event_, true);
695    return ZX_OK;
696}
697
698void ThreadDispatcher::OnExceptionPortRemoval(const fbl::RefPtr<ExceptionPort>& eport) {
699    canary_.Assert();
700
701    LTRACE_ENTRY_OBJ;
702    Guard<fbl::Mutex> guard{get_lock()};
703    if (!InExceptionLocked())
704        return;
705    DEBUG_ASSERT(exception_status_ != ExceptionStatus::IDLE);
706    if (exception_wait_port_ == eport) {
707        // Leave things alone if already processed. See MarkExceptionHandled.
708        if (exception_status_ == ExceptionStatus::UNPROCESSED) {
709            exception_status_ = ExceptionStatus::TRY_NEXT;
710            event_signal(&exception_event_, true);
711        }
712    }
713}
714
715bool ThreadDispatcher::InExceptionLocked() {
716    canary_.Assert();
717
718    LTRACE_ENTRY_OBJ;
719    DEBUG_ASSERT(get_lock()->lock().IsHeld());
720    return thread_stopped_in_exception(&thread_);
721}
722
723zx_status_t ThreadDispatcher::GetInfoForUserspace(zx_info_thread_t* info) {
724    canary_.Assert();
725
726    LTRACE_ENTRY_OBJ;
727
728    *info = {};
729
730    State state;
731    Blocked blocked_reason;
732    ExceptionPort::Type excp_port_type;
733    // We need to fetch all these values under lock, but once we have them
734    // we no longer need the lock.
735    {
736        Guard<fbl::Mutex> guard{get_lock()};
737        state = state_;
738        blocked_reason = blocked_reason_;
739        if (InExceptionLocked() &&
740            // A port type of !NONE here indicates to the caller that the
741            // thread is waiting for an exception response. So don't return
742            // !NONE if the thread just woke up but hasn't reacquired
743            // |state_lock_|.
744            exception_status_ == ExceptionStatus::UNPROCESSED) {
745            DEBUG_ASSERT(exception_wait_port_ != nullptr);
746            excp_port_type = exception_wait_port_->type();
747        } else {
748            // Either we're not in an exception, or we're in the window where
749            // event_wait_deadline has woken up but |state_lock_| has
750            // not been reacquired.
751            DEBUG_ASSERT(exception_wait_port_ == nullptr ||
752                         exception_status_ != ExceptionStatus::UNPROCESSED);
753            excp_port_type = ExceptionPort::Type::NONE;
754        }
755    }
756
757    switch (state) {
758    case ThreadDispatcher::State::INITIAL:
759    case ThreadDispatcher::State::INITIALIZED:
760        info->state = ZX_THREAD_STATE_NEW;
761        break;
762    case ThreadDispatcher::State::RUNNING:
763        // The thread may be "running" but be blocked in a syscall or
764        // exception handler.
765        switch (blocked_reason) {
766        case Blocked::NONE:
767            info->state = ZX_THREAD_STATE_RUNNING;
768            break;
769        case Blocked::EXCEPTION:
770            info->state = ZX_THREAD_STATE_BLOCKED_EXCEPTION;
771            break;
772        case Blocked::SLEEPING:
773            info->state = ZX_THREAD_STATE_BLOCKED_SLEEPING;
774            break;
775        case Blocked::FUTEX:
776            info->state = ZX_THREAD_STATE_BLOCKED_FUTEX;
777            break;
778        case Blocked::PORT:
779            info->state = ZX_THREAD_STATE_BLOCKED_PORT;
780            break;
781        case Blocked::CHANNEL:
782            info->state = ZX_THREAD_STATE_BLOCKED_CHANNEL;
783            break;
784        case Blocked::WAIT_ONE:
785            info->state = ZX_THREAD_STATE_BLOCKED_WAIT_ONE;
786            break;
787        case Blocked::WAIT_MANY:
788            info->state = ZX_THREAD_STATE_BLOCKED_WAIT_MANY;
789            break;
790        case Blocked::INTERRUPT:
791            info->state = ZX_THREAD_STATE_BLOCKED_INTERRUPT;
792            break;
793        default:
794            DEBUG_ASSERT_MSG(false, "unexpected blocked reason: %d",
795                             static_cast<int>(blocked_reason));
796            break;
797        }
798        break;
799    case ThreadDispatcher::State::SUSPENDED:
800        info->state = ZX_THREAD_STATE_SUSPENDED;
801        break;
802    case ThreadDispatcher::State::DYING:
803        info->state = ZX_THREAD_STATE_DYING;
804        break;
805    case ThreadDispatcher::State::DEAD:
806        info->state = ZX_THREAD_STATE_DEAD;
807        break;
808    default:
809        DEBUG_ASSERT_MSG(false, "unexpected run state: %d",
810                         static_cast<int>(state));
811        break;
812    }
813
814    switch (excp_port_type) {
815    case ExceptionPort::Type::NONE:
816        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_NONE;
817        break;
818    case ExceptionPort::Type::DEBUGGER:
819        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_DEBUGGER;
820        break;
821    case ExceptionPort::Type::JOB_DEBUGGER:
822        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_JOB_DEBUGGER;
823        break;
824    case ExceptionPort::Type::THREAD:
825        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_THREAD;
826        break;
827    case ExceptionPort::Type::PROCESS:
828        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_PROCESS;
829        break;
830    case ExceptionPort::Type::JOB:
831        info->wait_exception_port_type = ZX_EXCEPTION_PORT_TYPE_JOB;
832        break;
833    default:
834        DEBUG_ASSERT_MSG(false, "unexpected exception port type: %d",
835                         static_cast<int>(excp_port_type));
836        break;
837    }
838
839    return ZX_OK;
840}
841
842zx_status_t ThreadDispatcher::GetStatsForUserspace(zx_info_thread_stats_t* info) {
843    canary_.Assert();
844
845    LTRACE_ENTRY_OBJ;
846
847    *info = {};
848
849    info->total_runtime = runtime_ns();
850    return ZX_OK;
851}
852
853zx_status_t ThreadDispatcher::GetExceptionReport(zx_exception_report_t* report) {
854    canary_.Assert();
855
856    LTRACE_ENTRY_OBJ;
857    Guard<fbl::Mutex> guard{get_lock()};
858    if (!InExceptionLocked())
859        return ZX_ERR_BAD_STATE;
860    DEBUG_ASSERT(exception_report_ != nullptr);
861    *report = *exception_report_;
862    return ZX_OK;
863}
864
865// Note: buffer must be sufficiently aligned
866
867zx_status_t ThreadDispatcher::ReadState(zx_thread_state_topic_t state_kind,
868                                        void* buffer, size_t buffer_len) {
869    canary_.Assert();
870
871    LTRACE_ENTRY_OBJ;
872
873    // We can't be reading regs while the thread transitions from
874    // SUSPENDED to RUNNING.
875    Guard<fbl::Mutex> guard{get_lock()};
876
877    if (state_ != State::SUSPENDED && !InExceptionLocked())
878        return ZX_ERR_BAD_STATE;
879
880    switch (state_kind) {
881    case ZX_THREAD_STATE_GENERAL_REGS: {
882        if (buffer_len != sizeof(zx_thread_state_general_regs_t))
883            return ZX_ERR_INVALID_ARGS;
884        return arch_get_general_regs(
885            &thread_, static_cast<zx_thread_state_general_regs_t*>(buffer));
886    }
887    case ZX_THREAD_STATE_FP_REGS: {
888        if (buffer_len != sizeof(zx_thread_state_fp_regs_t))
889            return ZX_ERR_INVALID_ARGS;
890        return arch_get_fp_regs(
891            &thread_, static_cast<zx_thread_state_fp_regs_t*>(buffer));
892    }
893    case ZX_THREAD_STATE_VECTOR_REGS: {
894        if (buffer_len != sizeof(zx_thread_state_vector_regs_t))
895            return ZX_ERR_INVALID_ARGS;
896        return arch_get_vector_regs(
897            &thread_, static_cast<zx_thread_state_vector_regs_t*>(buffer));
898    }
899    case ZX_THREAD_STATE_SINGLE_STEP: {
900        if (buffer_len != sizeof(zx_thread_state_single_step_t))
901            return ZX_ERR_INVALID_ARGS;
902        bool single_step;
903        zx_status_t status = arch_get_single_step(&thread_, &single_step);
904        if (status != ZX_OK)
905            return status;
906        *static_cast<zx_thread_state_single_step_t*>(buffer) =
907            static_cast<zx_thread_state_single_step_t>(single_step);
908        return ZX_OK;
909    }
910    default:
911        return ZX_ERR_INVALID_ARGS;
912    }
913}
914
915// Note: buffer must be sufficiently aligned
916
917zx_status_t ThreadDispatcher::WriteState(zx_thread_state_topic_t state_kind,
918                                         const void* buffer, size_t buffer_len) {
919    canary_.Assert();
920
921    LTRACE_ENTRY_OBJ;
922
923    // We can't be reading regs while the thread transitions from
924    // SUSPENDED to RUNNING.
925    Guard<fbl::Mutex> guard{get_lock()};
926
927    if (state_ != State::SUSPENDED && !InExceptionLocked())
928        return ZX_ERR_BAD_STATE;
929
930    switch (state_kind) {
931    case ZX_THREAD_STATE_GENERAL_REGS: {
932        if (buffer_len != sizeof(zx_thread_state_general_regs_t))
933            return ZX_ERR_INVALID_ARGS;
934        return arch_set_general_regs(&thread_,
935                                     static_cast<const zx_thread_state_general_regs_t*>(buffer));
936    }
937    case ZX_THREAD_STATE_FP_REGS: {
938        if (buffer_len != sizeof(zx_thread_state_fp_regs_t))
939            return ZX_ERR_INVALID_ARGS;
940        return arch_set_fp_regs(&thread_,
941                                static_cast<const zx_thread_state_fp_regs_t*>(buffer));
942    }
943    case ZX_THREAD_STATE_VECTOR_REGS: {
944        if (buffer_len != sizeof(zx_thread_state_vector_regs_t))
945            return ZX_ERR_INVALID_ARGS;
946        return arch_set_vector_regs(&thread_,
947                                    static_cast<const zx_thread_state_vector_regs_t*>(buffer));
948    }
949    case ZX_THREAD_STATE_SINGLE_STEP: {
950        if (buffer_len != sizeof(zx_thread_state_single_step_t))
951            return ZX_ERR_INVALID_ARGS;
952        const zx_thread_state_single_step_t* single_step =
953            static_cast<const zx_thread_state_single_step_t*>(buffer);
954        if (*single_step != 0 && *single_step != 1)
955            return ZX_ERR_INVALID_ARGS;
956        return arch_set_single_step(&thread_, !!*single_step);
957    }
958    default:
959        return ZX_ERR_INVALID_ARGS;
960    }
961}
962
963zx_status_t ThreadDispatcher::SetPriority(int32_t priority) {
964    Guard<fbl::Mutex> guard{get_lock()};
965    if ((state_ == State::INITIAL) ||
966        (state_ == State::DYING) ||
967        (state_ == State::DEAD)) {
968        return ZX_ERR_BAD_STATE;
969    }
970    // The priority was already validated by the Profile dispatcher.
971    thread_set_priority(&thread_, priority);
972    return ZX_OK;
973}
974
975void get_user_thread_process_name(const void* user_thread,
976                                  char out_name[ZX_MAX_NAME_LEN]) {
977    const ThreadDispatcher* ut =
978        reinterpret_cast<const ThreadDispatcher*>(user_thread);
979    ut->process()->get_name(out_name);
980}
981
982const char* StateToString(ThreadDispatcher::State state) {
983    switch (state) {
984    case ThreadDispatcher::State::INITIAL:
985        return "initial";
986    case ThreadDispatcher::State::INITIALIZED:
987        return "initialized";
988    case ThreadDispatcher::State::RUNNING:
989        return "running";
990    case ThreadDispatcher::State::SUSPENDED:
991        return "suspended";
992    case ThreadDispatcher::State::DYING:
993        return "dying";
994    case ThreadDispatcher::State::DEAD:
995        return "dead";
996    }
997    return "unknown";
998}
999
1000zx_koid_t ThreadDispatcher::get_related_koid() const {
1001    canary_.Assert();
1002
1003    return process_->get_koid();
1004}
1005