1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <runtime/thread.h>
6
7#include <zircon/stack.h>
8#include <zircon/syscalls.h>
9#include <runtime/mutex.h>
10#include <stddef.h>
11#include <stdint.h>
12
13// An zxr_thread_t starts its life JOINABLE.
14// - If someone calls zxr_thread_join on it, it transitions to JOINED.
15// - If someone calls zxr_thread_detach on it, it transitions to DETACHED.
16// - When it begins exiting, the EXITING state is entered.
17// - When it is no longer using its memory and handle resources, it transitions
18//   to DONE.  If the thread was DETACHED prior to EXITING, this transition MAY
19//   not happen.
20// No other transitions occur.
21
22enum {
23    JOINABLE,
24    DETACHED,
25    JOINED,
26    EXITING,
27    DONE,
28};
29
30typedef struct {
31    zxr_thread_entry_t entry;
32    zx_handle_t handle;
33    atomic_int state;
34} zxr_internal_thread_t;
35
36// zxr_thread_t should reserve enough size for our internal data.
37_Static_assert(sizeof(zxr_thread_t) == sizeof(zxr_internal_thread_t),
38               "Update zxr_thread_t size for this platform.");
39
40static inline zxr_internal_thread_t* to_internal(zxr_thread_t* external) {
41  return (zxr_internal_thread_t*)(external);
42}
43
44zx_status_t zxr_thread_destroy(zxr_thread_t* thread) {
45    zx_handle_t handle = to_internal(thread)->handle;
46    to_internal(thread)->handle = ZX_HANDLE_INVALID;
47    return handle == ZX_HANDLE_INVALID ? ZX_OK : _zx_handle_close(handle);
48}
49
50// Put the thread into EXITING state.  Returns the previous state.
51static int begin_exit(zxr_internal_thread_t* thread) {
52    return atomic_exchange_explicit(&thread->state, EXITING, memory_order_release);
53}
54
55// Claim the thread as JOINED or DETACHED.  Returns true on success, which only
56// happens if the previous state was JOINABLE.  Always returns the previous state.
57static bool claim_thread(zxr_internal_thread_t* thread, int new_state, int* old_state) {
58    *old_state = JOINABLE;
59    return atomic_compare_exchange_strong_explicit(
60            &thread->state, old_state, new_state,
61            memory_order_acq_rel, memory_order_acquire);
62}
63
64// Extract the handle from the thread structure.  This must only be called by the thread
65// itself (i.e., this is not thread-safe).
66static zx_handle_t take_handle(zxr_internal_thread_t* thread) {
67    zx_handle_t tmp = thread->handle;
68    thread->handle = ZX_HANDLE_INVALID;
69    return tmp;
70}
71
72static _Noreturn void exit_non_detached(zxr_internal_thread_t* thread) {
73    // As soon as thread->state has changed to to DONE, a caller of zxr_thread_join
74    // might complete and deallocate the memory containing the thread descriptor.
75    // Hence it's no longer safe to touch *thread or read anything out of it.
76    // Therefore we must extract the thread handle before that transition
77    // happens.
78    zx_handle_t handle = take_handle(thread);
79
80    // Wake the _zx_futex_wait in zxr_thread_join (below), and then die.
81    // This has to be done with the special four-in-one vDSO call because
82    // as soon as the state transitions to DONE, the joiner is free to unmap
83    // our stack out from under us.  Note there is a benign race here still: if
84    // the address is unmapped and our futex_wake fails, it's OK; if the memory
85    // is reused for something else and our futex_wake tickles somebody
86    // completely unrelated, well, that's why futex_wait can always have
87    // spurious wakeups.
88    _zx_futex_wake_handle_close_thread_exit(&thread->state, 1, DONE, handle);
89    __builtin_trap();
90}
91
92static _Noreturn void thread_trampoline(uintptr_t ctx, uintptr_t arg) {
93    zxr_internal_thread_t* thread = (zxr_internal_thread_t*)ctx;
94
95    thread->entry((void*)arg);
96
97    int old_state = begin_exit(thread);
98    switch (old_state) {
99    case JOINABLE:
100        // Nobody's watching right now, but they might start watching as we
101        // exit.  Just in case, behave as if we've been joined and wake the
102        // futex on our way out.
103    case JOINED:
104        // Somebody loves us!  Or at least intends to inherit when we die.
105        exit_non_detached(thread);
106        break;
107    }
108
109    // Cannot be in DONE, EXITING, or DETACHED and reach here.  For DETACHED, it
110    // is the responsibility of a higher layer to ensure this is not reached.
111    __builtin_trap();
112}
113
114_Noreturn void zxr_thread_exit_unmap_if_detached(
115    zxr_thread_t* thread, zx_handle_t vmar, uintptr_t addr, size_t len) {
116
117    int old_state = begin_exit(to_internal(thread));
118    switch (old_state) {
119    case DETACHED: {
120        zx_handle_t handle = take_handle(to_internal(thread));
121        _zx_vmar_unmap_handle_close_thread_exit(vmar, addr, len, handle);
122        break;
123    }
124    // See comments in thread_trampoline.
125    case JOINABLE:
126    case JOINED:
127        exit_non_detached(to_internal(thread));
128        break;
129    }
130
131    // Cannot be in DONE or the EXITING and reach here.
132    __builtin_trap();
133}
134
135// Local implementation so libruntime does not depend on libc.
136static size_t local_strlen(const char* s) {
137    size_t len = 0;
138    while (*s++ != '\0')
139        ++len;
140    return len;
141}
142
143static void initialize_thread(zxr_internal_thread_t* thread,
144                              zx_handle_t handle, bool detached) {
145    *thread = (zxr_internal_thread_t){ .handle = handle, };
146    atomic_init(&thread->state, detached ? DETACHED : JOINABLE);
147}
148
149zx_status_t zxr_thread_create(zx_handle_t process, const char* name,
150                              bool detached, zxr_thread_t* thread) {
151    initialize_thread(to_internal(thread), ZX_HANDLE_INVALID, detached);
152    if (name == NULL)
153        name = "";
154    size_t name_length = local_strlen(name) + 1;
155    return _zx_thread_create(process, name, name_length, 0, &to_internal(thread)->handle);
156}
157
158zx_status_t zxr_thread_start(zxr_thread_t* thread, uintptr_t stack_addr, size_t stack_size, zxr_thread_entry_t entry, void* arg) {
159    to_internal(thread)->entry = entry;
160
161    // compute the starting address of the stack
162    uintptr_t sp = compute_initial_stack_pointer(stack_addr, stack_size);
163
164    // kick off the new thread
165    zx_status_t status = _zx_thread_start(to_internal(thread)->handle,
166                                          (uintptr_t)thread_trampoline, sp,
167                                          (uintptr_t)thread, (uintptr_t)arg);
168
169    if (status != ZX_OK)
170        zxr_thread_destroy(thread);
171    return status;
172}
173
174static void wait_for_done(zxr_internal_thread_t* thread, int32_t old_state) {
175    do {
176        switch (_zx_futex_wait(&thread->state, old_state, ZX_TIME_INFINITE)) {
177            case ZX_ERR_BAD_STATE:   // Never blocked because it had changed.
178            case ZX_OK:              // Woke up because it might have changed.
179                old_state = atomic_load_explicit(&thread->state,
180                                                 memory_order_acquire);
181                break;
182            default:
183                __builtin_trap();
184        }
185        // Wait until we reach the DONE state, even if we observe the
186        // intermediate EXITING state.
187    } while (old_state == JOINED || old_state == EXITING);
188
189    if (old_state != DONE)
190        __builtin_trap();
191}
192
193zx_status_t zxr_thread_join(zxr_thread_t* external_thread) {
194    zxr_internal_thread_t* thread = to_internal(external_thread);
195
196    int old_state;
197    // Try to claim the join slot on this thread.
198    if (claim_thread(thread, JOINED, &old_state)) {
199        wait_for_done(thread, JOINED);
200    } else {
201        switch (old_state) {
202            case JOINED:
203            case DETACHED:
204                return ZX_ERR_INVALID_ARGS;
205            case EXITING:
206                // Since it is undefined to call zxr_thread_join on a thread
207                // that has already been detached or joined, we assume the state
208                // prior to EXITING was JOINABLE, and act as if we had
209                // successfully transitioned to JOINED.
210                wait_for_done(thread, EXITING);
211                // Fall-through to DONE case
212            case DONE:
213                break;
214            default:
215                __builtin_trap();
216        }
217    }
218
219    // The thread has already closed its own handle.
220    return ZX_OK;
221}
222
223zx_status_t zxr_thread_detach(zxr_thread_t* thread) {
224    int old_state;
225    // Try to claim the join slot on this thread on behalf of the thread.
226    if (!claim_thread(to_internal(thread), DETACHED, &old_state)) {
227        switch (old_state) {
228            case DETACHED:
229            case JOINED:
230                return ZX_ERR_INVALID_ARGS;
231            case EXITING: {
232                // Since it is undefined behavior to call zxr_thread_detach on a
233                // thread that has already been detached or joined, we assume
234                // the state prior to EXITING was JOINABLE.  However, since the
235                // thread is already shutting down, it is too late to tell it to
236                // clean itself up.  Since the thread is still running, we cannot
237                // just return ZX_ERR_BAD_STATE, which would suggest we couldn't detach and
238                // the thread has already finished running.  Instead, we call join,
239                // which will return soon due to the thread being actively shutting down,
240                // and then return ZX_ERR_BAD_STATE to tell the caller that they
241                // must manually perform any post-join work.
242                zx_status_t ret = zxr_thread_join(thread);
243                if (unlikely(ret != ZX_OK)) {
244                    if (unlikely(ret != ZX_ERR_INVALID_ARGS)) {
245                        __builtin_trap();
246                    }
247                    return ret;
248                }
249                // Fall-through to DONE case.
250                __FALLTHROUGH;
251            }
252            case DONE:
253                return ZX_ERR_BAD_STATE;
254            default:
255                __builtin_trap();
256        }
257    }
258
259    return ZX_OK;
260}
261
262bool zxr_thread_detached(zxr_thread_t* thread) {
263    int state = atomic_load_explicit(&to_internal(thread)->state, memory_order_acquire);
264    return state == DETACHED;
265}
266
267zx_handle_t zxr_thread_get_handle(zxr_thread_t* thread) {
268    return to_internal(thread)->handle;
269}
270
271zx_status_t zxr_thread_adopt(zx_handle_t handle, zxr_thread_t* thread) {
272    initialize_thread(to_internal(thread), handle, false);
273    return handle == ZX_HANDLE_INVALID ? ZX_ERR_BAD_HANDLE : ZX_OK;
274}
275