1// Copyright 2016 The Fuchsia Authors
2//
3// Use of this source code is governed by a MIT-style
4// license that can be found in the LICENSE file or at
5// https://opensource.org/licenses/MIT
6
7#include <object/process_dispatcher.h>
8
9#include <assert.h>
10#include <inttypes.h>
11#include <list.h>
12#include <rand.h>
13#include <string.h>
14#include <trace.h>
15
16#include <arch/defines.h>
17
18#include <kernel/thread.h>
19#include <vm/vm.h>
20#include <vm/vm_aspace.h>
21#include <vm/vm_object.h>
22
23#include <lib/crypto/global_prng.h>
24#include <lib/ktrace.h>
25
26#include <zircon/rights.h>
27
28#include <object/diagnostics.h>
29#include <object/futex_context.h>
30#include <object/handle.h>
31#include <object/job_dispatcher.h>
32#include <object/thread_dispatcher.h>
33#include <object/vm_address_region_dispatcher.h>
34#include <object/vm_object_dispatcher.h>
35
36#include <fbl/alloc_checker.h>
37#include <fbl/auto_lock.h>
38
39#define LOCAL_TRACE 0
40
41static zx_handle_t map_handle_to_value(const Handle* handle, uint32_t mixer) {
42    // Ensure that the last bit of the result is not zero, and make sure
43    // we don't lose any base_value bits or make the result negative
44    // when shifting.
45    DEBUG_ASSERT((mixer & ((1<<31) | 0x1)) == 0);
46    DEBUG_ASSERT((handle->base_value() & 0xc0000000) == 0);
47
48    auto handle_id = (handle->base_value() << 1) | 0x1;
49    return static_cast<zx_handle_t>(mixer ^ handle_id);
50}
51
52static Handle* map_value_to_handle(zx_handle_t value, uint32_t mixer) {
53    auto handle_id = (static_cast<uint32_t>(value) ^ mixer) >> 1;
54    return Handle::FromU32(handle_id);
55}
56
57zx_status_t ProcessDispatcher::Create(
58    fbl::RefPtr<JobDispatcher> job, fbl::StringPiece name, uint32_t flags,
59    fbl::RefPtr<Dispatcher>* dispatcher, zx_rights_t* rights,
60    fbl::RefPtr<VmAddressRegionDispatcher>* root_vmar_disp,
61    zx_rights_t* root_vmar_rights) {
62    fbl::AllocChecker ac;
63    fbl::RefPtr<ProcessDispatcher> process =
64        fbl::AdoptRef(new (&ac) ProcessDispatcher(job, name, flags));
65    if (!ac.check())
66        return ZX_ERR_NO_MEMORY;
67
68    if (!job->AddChildProcess(process))
69        return ZX_ERR_BAD_STATE;
70
71    zx_status_t result = process->Initialize();
72    if (result != ZX_OK)
73        return result;
74
75    fbl::RefPtr<VmAddressRegion> vmar(process->aspace()->RootVmar());
76
77    // Create a dispatcher for the root VMAR.
78    fbl::RefPtr<Dispatcher> new_vmar_dispatcher;
79    result = VmAddressRegionDispatcher::Create(vmar, ARCH_MMU_FLAG_PERM_USER,
80                                               &new_vmar_dispatcher,
81                                               root_vmar_rights);
82    if (result != ZX_OK) {
83        process->aspace_->Destroy();
84        return result;
85    }
86
87    *rights = ZX_DEFAULT_PROCESS_RIGHTS;
88    *dispatcher = fbl::move(process);
89    *root_vmar_disp = DownCastDispatcher<VmAddressRegionDispatcher>(
90            &new_vmar_dispatcher);
91
92    return ZX_OK;
93}
94
95ProcessDispatcher::ProcessDispatcher(fbl::RefPtr<JobDispatcher> job,
96                                     fbl::StringPiece name,
97                                     uint32_t flags)
98  : job_(fbl::move(job)), policy_(job_->GetPolicy()),
99    name_(name.data(), name.length()) {
100    LTRACE_ENTRY_OBJ;
101
102    // Generate handle XOR mask with top bit and bottom two bits cleared
103    uint32_t secret;
104    auto prng = crypto::GlobalPRNG::GetInstance();
105    prng->Draw(&secret, sizeof(secret));
106
107    // Handle values cannot be negative values, so we mask the high bit.
108    handle_rand_ = (secret << 2) & INT_MAX;
109}
110
111ProcessDispatcher::~ProcessDispatcher() {
112    LTRACE_ENTRY_OBJ;
113
114    DEBUG_ASSERT(state_ == State::INITIAL || state_ == State::DEAD);
115
116    // Assert that the -> DEAD transition cleaned up what it should have.
117    DEBUG_ASSERT(handles_.is_empty());
118    DEBUG_ASSERT(exception_port_ == nullptr);
119    DEBUG_ASSERT(debugger_exception_port_ == nullptr);
120
121    // Remove ourselves from the parent job's raw ref to us. Note that this might
122    // have beeen called when transitioning State::DEAD. The Job can handle double calls.
123    job_->RemoveChildProcess(this);
124
125    LTRACE_EXIT_OBJ;
126}
127
128void ProcessDispatcher::on_zero_handles() {
129    // If the process is in the initial state and the last handle is closed
130    // we never detach from the parent job, so run the shutdown sequence for
131    // that case.
132    {
133        Guard<fbl::Mutex> guard{get_lock()};
134        if (state_ != State::INITIAL) {
135            // Use the normal cleanup path instead.
136            return;
137        }
138        SetStateLocked(State::DEAD);
139    }
140
141    FinishDeadTransition();
142}
143
144void ProcessDispatcher::get_name(char out_name[ZX_MAX_NAME_LEN]) const {
145    name_.get(ZX_MAX_NAME_LEN, out_name);
146}
147
148zx_status_t ProcessDispatcher::set_name(const char* name, size_t len) {
149    return name_.set(name, len);
150}
151
152zx_status_t ProcessDispatcher::Initialize() {
153    LTRACE_ENTRY_OBJ;
154
155    Guard<fbl::Mutex> guard{get_lock()};
156
157    DEBUG_ASSERT(state_ == State::INITIAL);
158
159    // create an address space for this process, named after the process's koid.
160    char aspace_name[ZX_MAX_NAME_LEN];
161    snprintf(aspace_name, sizeof(aspace_name), "proc:%" PRIu64, get_koid());
162    aspace_ = VmAspace::Create(VmAspace::TYPE_USER, aspace_name);
163    if (!aspace_) {
164        TRACEF("error creating address space\n");
165        return ZX_ERR_NO_MEMORY;
166    }
167
168    return ZX_OK;
169}
170
171void ProcessDispatcher::Exit(int64_t retcode) {
172    LTRACE_ENTRY_OBJ;
173
174    DEBUG_ASSERT(ProcessDispatcher::GetCurrent() == this);
175
176    {
177        Guard<fbl::Mutex> guard{get_lock()};
178
179        // check that we're in the RUNNING state or we're racing with something
180        // else that has already pushed us until the DYING state
181        DEBUG_ASSERT_MSG(state_ == State::RUNNING || state_ == State::DYING,
182                "state is %s", StateToString(state_));
183
184        // Set the exit status if there isn't already an exit in progress.
185        if (state_ != State::DYING) {
186            DEBUG_ASSERT(retcode_ == 0);
187            retcode_ = retcode;
188        }
189
190        // enter the dying state, which should kill all threads
191        SetStateLocked(State::DYING);
192    }
193
194    ThreadDispatcher::GetCurrent()->Exit();
195
196    __UNREACHABLE;
197}
198
199void ProcessDispatcher::Kill() {
200    LTRACE_ENTRY_OBJ;
201
202    // ZX-880: Call RemoveChildProcess outside of |get_lock()|.
203    bool became_dead = false;
204
205    {
206        Guard<fbl::Mutex> guard{get_lock()};
207
208        // we're already dead
209        if (state_ == State::DEAD)
210            return;
211
212        if (state_ != State::DYING) {
213            // If there isn't an Exit already in progress, set a nonzero exit
214            // status so e.g. crashing tests don't appear to have succeeded.
215            DEBUG_ASSERT(retcode_ == 0);
216            retcode_ = -1;
217        }
218
219        // if we have no threads, enter the dead state directly
220        if (thread_list_.is_empty()) {
221            SetStateLocked(State::DEAD);
222            became_dead = true;
223        } else {
224            // enter the dying state, which should trigger a thread kill.
225            // the last thread exiting will transition us to DEAD
226            SetStateLocked(State::DYING);
227        }
228    }
229
230    if (became_dead)
231        FinishDeadTransition();
232}
233
234void ProcessDispatcher::KillAllThreadsLocked() {
235    LTRACE_ENTRY_OBJ;
236
237    for (auto& thread : thread_list_) {
238        LTRACEF("killing thread %p\n", &thread);
239        thread.Kill();
240    }
241}
242
243zx_status_t ProcessDispatcher::AddThread(ThreadDispatcher* t, bool initial_thread) {
244    LTRACE_ENTRY_OBJ;
245
246    Guard<fbl::Mutex> guard{get_lock()};
247
248    if (initial_thread) {
249        if (state_ != State::INITIAL)
250            return ZX_ERR_BAD_STATE;
251    } else {
252        // We must not add a thread when in the DYING or DEAD states.
253        // Also, we want to ensure that this is not the first thread.
254        if (state_ != State::RUNNING)
255            return ZX_ERR_BAD_STATE;
256    }
257
258    // add the thread to our list
259    DEBUG_ASSERT(thread_list_.is_empty() == initial_thread);
260    thread_list_.push_back(t);
261
262    DEBUG_ASSERT(t->process() == this);
263
264    if (initial_thread)
265        SetStateLocked(State::RUNNING);
266
267    return ZX_OK;
268}
269
270// This is called within thread T's context when it is exiting.
271
272void ProcessDispatcher::RemoveThread(ThreadDispatcher* t) {
273    LTRACE_ENTRY_OBJ;
274
275    // ZX-880: Call RemoveChildProcess outside of |get_lock()|.
276    bool became_dead = false;
277
278    {
279        // we're going to check for state and possibly transition below
280        Guard<fbl::Mutex> guard{get_lock()};
281
282        // remove the thread from our list
283        DEBUG_ASSERT(t != nullptr);
284        thread_list_.erase(*t);
285
286        // if this was the last thread, transition directly to DEAD state
287        if (thread_list_.is_empty()) {
288            LTRACEF("last thread left the process %p, entering DEAD state\n", this);
289            SetStateLocked(State::DEAD);
290            became_dead = true;
291        }
292    }
293
294    if (became_dead)
295        FinishDeadTransition();
296}
297
298zx_koid_t ProcessDispatcher::get_related_koid() const {
299    return job_->get_koid();
300}
301
302ProcessDispatcher::State ProcessDispatcher::state() const {
303    Guard<fbl::Mutex> guard{get_lock()};
304    return state_;
305}
306
307fbl::RefPtr<JobDispatcher> ProcessDispatcher::job() {
308    return job_;
309}
310
311void ProcessDispatcher::SetStateLocked(State s) {
312    LTRACEF("process %p: state %u (%s)\n", this, static_cast<unsigned int>(s), StateToString(s));
313
314    DEBUG_ASSERT(get_lock()->lock().IsHeld());
315
316    // look for some invalid state transitions
317    if (state_ == State::DEAD && s != State::DEAD) {
318        panic("ProcessDispatcher::SetStateLocked invalid state transition from DEAD to !DEAD\n");
319        return;
320    }
321
322    // transitions to your own state are okay
323    if (s == state_)
324        return;
325
326    state_ = s;
327
328    if (s == State::DYING) {
329        // send kill to all of our threads
330        KillAllThreadsLocked();
331    }
332}
333
334// Finish processing of the transition to State::DEAD. Some things need to be done
335// outside of holding |get_lock()|. Beware this is called from several places
336// including on_zero_handles().
337void ProcessDispatcher::FinishDeadTransition() {
338    DEBUG_ASSERT(!completely_dead_);
339    completely_dead_ = true;
340
341    // clean up the handle table
342    LTRACEF_LEVEL(2, "cleaning up handle table on proc %p\n", this);
343
344    fbl::DoublyLinkedList<Handle*> to_clean;
345    {
346        Guard<fbl::Mutex> guard{&handle_table_lock_};
347        for (auto& handle : handles_) {
348            handle.set_process_id(0u);
349        }
350        to_clean.swap(handles_);
351    }
352
353    // zx-1544: Here is where if we're the last holder of a handle of one of
354    // our exception ports then ResetExceptionPort will get called (by
355    // ExceptionPort::OnPortZeroHandles) and will need to grab |get_lock()|.
356    // This needs to be done outside of |get_lock()|.
357    while (!to_clean.is_empty()) {
358        // Delete handle via HandleOwner dtor.
359        HandleOwner ho(to_clean.pop_front());
360    }
361
362    LTRACEF_LEVEL(2, "done cleaning up handle table on proc %p\n", this);
363
364    // tear down the address space
365    aspace_->Destroy();
366
367    // signal waiter
368    LTRACEF_LEVEL(2, "signaling waiters\n");
369    UpdateState(0u, ZX_TASK_TERMINATED);
370
371    // The PROC_CREATE record currently emits a uint32_t koid.
372    uint32_t koid = static_cast<uint32_t>(get_koid());
373    ktrace(TAG_PROC_EXIT, koid, 0, 0, 0);
374
375    // Call job_->RemoveChildProcess(this) outside of |get_lock()|. Otherwise
376    // we risk a deadlock as we have |get_lock()| and RemoveChildProcess grabs
377    // the job's |lock_|, whereas JobDispatcher::EnumerateChildren obtains the
378    // locks in the opposite order. We want to keep lock acquisition order
379    // consistent, and JobDispatcher::EnumerateChildren's order makes
380    // sense. We don't need |get_lock()| when calling RemoveChildProcess
381    // here. ZX-880
382    // RemoveChildProcess is called soon after releasing |get_lock()| so that
383    // the semantics of signaling ZX_JOB_NO_PROCESSES match that of
384    // ZX_TASK_TERMINATED.
385    job_->RemoveChildProcess(this);
386}
387
388// process handle manipulation routines
389zx_handle_t ProcessDispatcher::MapHandleToValue(const Handle* handle) const {
390    return map_handle_to_value(handle, handle_rand_);
391}
392
393zx_handle_t ProcessDispatcher::MapHandleToValue(const HandleOwner& handle) const {
394    return map_handle_to_value(handle.get(), handle_rand_);
395}
396
397Handle* ProcessDispatcher::GetHandleLocked(zx_handle_t handle_value,
398                                           bool skip_policy) {
399    auto handle = map_value_to_handle(handle_value, handle_rand_);
400    if (handle && handle->process_id() == get_koid())
401        return handle;
402
403    // Handle lookup failed.  We potentially generate an exception,
404    // depending on the job policy.  Note that we don't use the return
405    // value from QueryPolicy() here: ZX_POL_ACTION_ALLOW and
406    // ZX_POL_ACTION_DENY are equivalent for ZX_POL_BAD_HANDLE.
407    if (likely(!skip_policy))
408        QueryPolicy(ZX_POL_BAD_HANDLE);
409    return nullptr;
410}
411
412void ProcessDispatcher::AddHandle(HandleOwner handle) {
413    Guard<fbl::Mutex> guard{&handle_table_lock_};
414    AddHandleLocked(fbl::move(handle));
415}
416
417void ProcessDispatcher::AddHandleLocked(HandleOwner handle) {
418    handle->set_process_id(get_koid());
419    handles_.push_front(handle.release());
420}
421
422HandleOwner ProcessDispatcher::RemoveHandle(zx_handle_t handle_value) {
423    Guard<fbl::Mutex> guard{&handle_table_lock_};
424    return RemoveHandleLocked(handle_value);
425}
426
427HandleOwner ProcessDispatcher::RemoveHandleLocked(zx_handle_t handle_value) {
428    auto handle = GetHandleLocked(handle_value);
429    if (!handle)
430        return nullptr;
431
432    handle->set_process_id(0u);
433    handles_.erase(*handle);
434
435    return HandleOwner(handle);
436}
437
438
439zx_status_t ProcessDispatcher::RemoveHandles(user_in_ptr<const zx_handle_t> user_handles,
440                                             size_t num_handles) {
441    zx_status_t status = ZX_OK;
442    size_t offset = 0;
443    while (offset < num_handles) {
444        // We process |num_handles| in chunks of |kMaxMessageHandles|
445        // because we don't have a limit on how large |num_handles|
446        // can be.
447        auto chunk_size = fbl::min<size_t>(num_handles - offset, kMaxMessageHandles);
448
449        zx_handle_t handles[kMaxMessageHandles];
450
451        // If we fail |copy_array_from_user|, then we might discard some, but
452        // not all, of the handles |user_handles| specified.
453        if (user_handles.copy_array_from_user(handles, chunk_size, offset) != ZX_OK)
454            return status;
455
456        {
457            Guard<fbl::Mutex> guard{handle_table_lock()};
458            for (size_t ix = 0; ix != chunk_size; ++ix) {
459                if (handles[ix] == ZX_HANDLE_INVALID)
460                    continue;
461                auto handle = RemoveHandleLocked(handles[ix]);
462                if (!handle)
463                    status = ZX_ERR_BAD_HANDLE;
464            }
465        }
466
467        offset += chunk_size;
468    }
469
470    return status;
471}
472
473zx_koid_t ProcessDispatcher::GetKoidForHandle(zx_handle_t handle_value) {
474    Guard<fbl::Mutex> guard{&handle_table_lock_};
475    Handle* handle = GetHandleLocked(handle_value);
476    if (!handle)
477        return ZX_KOID_INVALID;
478    return handle->dispatcher()->get_koid();
479}
480
481zx_status_t ProcessDispatcher::GetDispatcherInternal(zx_handle_t handle_value,
482                                                     fbl::RefPtr<Dispatcher>* dispatcher,
483                                                     zx_rights_t* rights) {
484    Guard<fbl::Mutex> guard{&handle_table_lock_};
485    Handle* handle = GetHandleLocked(handle_value);
486    if (!handle)
487        return ZX_ERR_BAD_HANDLE;
488
489    *dispatcher = handle->dispatcher();
490    if (rights)
491        *rights = handle->rights();
492    return ZX_OK;
493}
494
495zx_status_t ProcessDispatcher::GetDispatcherWithRightsInternal(zx_handle_t handle_value,
496                                                               zx_rights_t desired_rights,
497                                                               fbl::RefPtr<Dispatcher>* dispatcher_out,
498                                                               zx_rights_t* out_rights) {
499    Guard<fbl::Mutex> guard{&handle_table_lock_};
500    Handle* handle = GetHandleLocked(handle_value);
501    if (!handle)
502        return ZX_ERR_BAD_HANDLE;
503
504    if (!handle->HasRights(desired_rights))
505        return ZX_ERR_ACCESS_DENIED;
506
507    *dispatcher_out = handle->dispatcher();
508    if (out_rights)
509        *out_rights = handle->rights();
510    return ZX_OK;
511}
512
513zx_status_t ProcessDispatcher::GetInfo(zx_info_process_t* info) {
514    memset(info, 0, sizeof(*info));
515
516    State state;
517    // retcode_ depends on the state: make sure they're consistent.
518    {
519        Guard<fbl::Mutex> guard{get_lock()};
520        state = state_;
521        info->return_code = retcode_;
522        // TODO: Protect with rights if necessary.
523        info->debugger_attached = debugger_exception_port_ != nullptr;
524    }
525
526    switch (state) {
527    case State::DEAD:
528    case State::DYING:
529        info->exited = true;
530        __FALLTHROUGH;
531    case State::RUNNING:
532        info->started = true;
533        break;
534    case State::INITIAL:
535    default:
536        break;
537    }
538
539    return ZX_OK;
540}
541
542zx_status_t ProcessDispatcher::GetStats(zx_info_task_stats_t* stats) {
543    DEBUG_ASSERT(stats != nullptr);
544    Guard<fbl::Mutex> guard{get_lock()};
545    if (state_ != State::RUNNING) {
546        return ZX_ERR_BAD_STATE;
547    }
548    VmAspace::vm_usage_t usage;
549    zx_status_t s = aspace_->GetMemoryUsage(&usage);
550    if (s != ZX_OK) {
551        return s;
552    }
553    stats->mem_mapped_bytes = usage.mapped_pages * PAGE_SIZE;
554    stats->mem_private_bytes = usage.private_pages * PAGE_SIZE;
555    stats->mem_shared_bytes = usage.shared_pages * PAGE_SIZE;
556    stats->mem_scaled_shared_bytes = usage.scaled_shared_bytes;
557    return ZX_OK;
558}
559
560zx_status_t ProcessDispatcher::GetAspaceMaps(
561    user_out_ptr<zx_info_maps_t> maps, size_t max,
562    size_t* actual, size_t* available) {
563    Guard<fbl::Mutex> guard{get_lock()};
564    if (state_ != State::RUNNING) {
565        return ZX_ERR_BAD_STATE;
566    }
567    return GetVmAspaceMaps(aspace_, maps, max, actual, available);
568}
569
570zx_status_t ProcessDispatcher::GetVmos(
571    user_out_ptr<zx_info_vmo_t> vmos, size_t max,
572    size_t* actual_out, size_t* available_out) {
573    Guard<fbl::Mutex> guard{get_lock()};
574    if (state_ != State::RUNNING) {
575        return ZX_ERR_BAD_STATE;
576    }
577    size_t actual = 0;
578    size_t available = 0;
579    zx_status_t s = GetProcessVmosViaHandles(this, vmos, max, &actual, &available);
580    if (s != ZX_OK) {
581        return s;
582    }
583    size_t actual2 = 0;
584    size_t available2 = 0;
585    DEBUG_ASSERT(max >= actual);
586    s = GetVmAspaceVmos(aspace_, vmos.element_offset(actual), max - actual,
587                        &actual2, &available2);
588    if (s != ZX_OK) {
589        return s;
590    }
591    *actual_out = actual + actual2;
592    *available_out = available + available2;
593    return ZX_OK;
594}
595
596zx_status_t ProcessDispatcher::GetThreads(fbl::Array<zx_koid_t>* out_threads) {
597    Guard<fbl::Mutex> guard{get_lock()};
598    size_t n = thread_list_.size_slow();
599    fbl::Array<zx_koid_t> threads;
600    fbl::AllocChecker ac;
601    threads.reset(new (&ac) zx_koid_t[n], n);
602    if (!ac.check())
603        return ZX_ERR_NO_MEMORY;
604    size_t i = 0;
605    for (auto& thread : thread_list_) {
606        threads[i] = thread.get_koid();
607        ++i;
608    }
609    DEBUG_ASSERT(i == n);
610    *out_threads = fbl::move(threads);
611    return ZX_OK;
612}
613
614zx_status_t ProcessDispatcher::SetExceptionPort(fbl::RefPtr<ExceptionPort> eport) {
615    LTRACE_ENTRY_OBJ;
616    bool debugger = false;
617    switch (eport->type()) {
618    case ExceptionPort::Type::DEBUGGER:
619        debugger = true;
620        break;
621    case ExceptionPort::Type::PROCESS:
622        break;
623    default:
624        DEBUG_ASSERT_MSG(false, "unexpected port type: %d",
625                         static_cast<int>(eport->type()));
626        break;
627    }
628
629    // Lock |get_lock()| to ensure the process doesn't transition to dead
630    // while we're setting the exception handler.
631    Guard<fbl::Mutex> guard{get_lock()};
632    if (state_ == State::DEAD)
633        return ZX_ERR_NOT_FOUND;
634    if (debugger) {
635        if (debugger_exception_port_)
636            return ZX_ERR_ALREADY_BOUND;
637        debugger_exception_port_ = eport;
638    } else {
639        if (exception_port_)
640            return ZX_ERR_ALREADY_BOUND;
641        exception_port_ = eport;
642    }
643
644    return ZX_OK;
645}
646
647bool ProcessDispatcher::ResetExceptionPort(bool debugger, bool quietly) {
648    LTRACE_ENTRY_OBJ;
649    fbl::RefPtr<ExceptionPort> eport;
650
651    // Remove the exception handler first. As we resume threads we don't
652    // want them to hit another exception and get back into
653    // ExceptionHandlerExchange.
654    {
655        Guard<fbl::Mutex> guard{get_lock()};
656        if (debugger) {
657            debugger_exception_port_.swap(eport);
658        } else {
659            exception_port_.swap(eport);
660        }
661        if (eport == nullptr) {
662            // Attempted to unbind when no exception port is bound.
663            return false;
664        }
665        // This method must guarantee that no caller will return until
666        // OnTargetUnbind has been called on the port-to-unbind.
667        // This becomes important when a manual unbind races with a
668        // PortDispatcher::on_zero_handles auto-unbind.
669        //
670        // If OnTargetUnbind were called outside of the lock, it would lead to
671        // a race (for threads A and B):
672        //
673        //   A: Calls ResetExceptionPort; acquires the lock
674        //   A: Sees a non-null exception_port_, swaps it into the eport local.
675        //      exception_port_ is now null.
676        //   A: Releases the lock
677        //
678        //   B: Calls ResetExceptionPort; acquires the lock
679        //   B: Sees a null exception_port_ and returns. But OnTargetUnbind()
680        //      hasn't yet been called for the port.
681        //
682        // So, call it before releasing the lock.
683        eport->OnTargetUnbind();
684    }
685
686    if (!quietly) {
687        OnExceptionPortRemoval(eport);
688    }
689    return true;
690}
691
692fbl::RefPtr<ExceptionPort> ProcessDispatcher::exception_port() {
693    Guard<fbl::Mutex> guard{get_lock()};
694    return exception_port_;
695}
696
697fbl::RefPtr<ExceptionPort> ProcessDispatcher::debugger_exception_port() {
698    Guard<fbl::Mutex> guard{get_lock()};
699    return debugger_exception_port_;
700}
701
702void ProcessDispatcher::OnExceptionPortRemoval(
703        const fbl::RefPtr<ExceptionPort>& eport) {
704    Guard<fbl::Mutex> guard{get_lock()};
705    for (auto& thread : thread_list_) {
706        thread.OnExceptionPortRemoval(eport);
707    }
708}
709
710uint32_t ProcessDispatcher::ThreadCount() const {
711    canary_.Assert();
712
713    Guard<fbl::Mutex> guard{get_lock()};
714    return static_cast<uint32_t>(thread_list_.size_slow());
715}
716
717size_t ProcessDispatcher::PageCount() const {
718    canary_.Assert();
719
720    Guard<fbl::Mutex> guard{get_lock()};
721    if (state_ != State::RUNNING) {
722        return 0;
723    }
724    return aspace_->AllocatedPages();
725}
726
727class FindProcessByKoid final : public JobEnumerator {
728public:
729    FindProcessByKoid(zx_koid_t koid) : koid_(koid) {}
730    FindProcessByKoid(const FindProcessByKoid&) = delete;
731
732    // To be called after enumeration.
733    fbl::RefPtr<ProcessDispatcher> get_pd() { return pd_; }
734
735private:
736    bool OnProcess(ProcessDispatcher* process) final {
737        if (process->get_koid() == koid_) {
738            pd_ = fbl::WrapRefPtr(process);
739            // Stop the enumeration.
740            return false;
741        }
742        // Keep looking.
743        return true;
744    }
745
746    const zx_koid_t koid_;
747    fbl::RefPtr<ProcessDispatcher> pd_ = nullptr;
748};
749
750// static
751fbl::RefPtr<ProcessDispatcher> ProcessDispatcher::LookupProcessById(zx_koid_t koid) {
752    FindProcessByKoid finder(koid);
753    GetRootJobDispatcher()->EnumerateChildren(&finder, /* recurse */ true);
754    return finder.get_pd();
755}
756
757fbl::RefPtr<ThreadDispatcher> ProcessDispatcher::LookupThreadById(zx_koid_t koid) {
758    LTRACE_ENTRY_OBJ;
759    Guard<fbl::Mutex> guard{get_lock()};
760
761    auto iter = thread_list_.find_if([koid](const ThreadDispatcher& t) { return t.get_koid() == koid; });
762    return fbl::WrapRefPtr(iter.CopyPointer());
763}
764
765uintptr_t ProcessDispatcher::get_debug_addr() const {
766    Guard<fbl::Mutex> guard{get_lock()};
767    return debug_addr_;
768}
769
770zx_status_t ProcessDispatcher::set_debug_addr(uintptr_t addr) {
771    if (addr == 0u)
772        return ZX_ERR_INVALID_ARGS;
773    Guard<fbl::Mutex> guard{get_lock()};
774    // Only allow the value to be set to a nonzero or magic debug break once:
775    // Once ld.so has set it that's it.
776    if (!(debug_addr_ == 0u || debug_addr_ == ZX_PROCESS_DEBUG_ADDR_BREAK_ON_SET))
777        return ZX_ERR_ACCESS_DENIED;
778    debug_addr_ = addr;
779    return ZX_OK;
780}
781
782zx_status_t ProcessDispatcher::QueryPolicy(uint32_t condition) const {
783    auto action = GetSystemPolicyManager()->QueryBasicPolicy(policy_, condition);
784    if (action & ZX_POL_ACTION_EXCEPTION) {
785        thread_signal_policy_exception();
786    }
787    // TODO(cpu): check for the ZX_POL_KILL bit and return an error code
788    // that abigen understands as termination.
789    return (action & ZX_POL_ACTION_DENY) ? ZX_ERR_ACCESS_DENIED : ZX_OK;
790}
791
792uintptr_t ProcessDispatcher::cache_vdso_code_address() {
793    Guard<fbl::Mutex> guard{get_lock()};
794    vdso_code_address_ = aspace_->vdso_code_address();
795    return vdso_code_address_;
796}
797
798const char* StateToString(ProcessDispatcher::State state) {
799    switch (state) {
800    case ProcessDispatcher::State::INITIAL:
801        return "initial";
802    case ProcessDispatcher::State::RUNNING:
803        return "running";
804    case ProcessDispatcher::State::DYING:
805        return "dying";
806    case ProcessDispatcher::State::DEAD:
807        return "dead";
808    }
809    return "unknown";
810}
811
812bool ProcessDispatcher::IsHandleValid(zx_handle_t handle_value) {
813    Guard<fbl::Mutex> guard{&handle_table_lock_};
814    return (GetHandleLocked(handle_value) != nullptr);
815}
816
817bool ProcessDispatcher::IsHandleValidNoPolicyCheck(zx_handle_t handle_value) {
818    Guard<fbl::Mutex> guard{&handle_table_lock_};
819    return (GetHandleLocked(handle_value, true) != nullptr);
820}
821
822void ProcessDispatcher::OnProcessStartForJobDebugger(ThreadDispatcher *t) {
823    auto job = job_;
824    while (job) {
825      auto port = job->debugger_exception_port();
826      if (port) {
827        port->OnProcessStartForDebugger(t);
828        break;
829      } else {
830        job = job->parent();
831      }
832    }
833}
834