1#define _GNU_SOURCE 2#include "pthread_impl.h" 3#include "stdio_impl.h" 4#include "libc.h" 5#include <sys/mman.h> 6#include <string.h> 7#include <stddef.h> 8 9void *__mmap(void *, size_t, int, int, int, off_t); 10int __munmap(void *, size_t); 11int __mprotect(void *, size_t, int); 12 13static void dummy_0() 14{ 15} 16weak_alias(dummy_0, __acquire_ptc); 17weak_alias(dummy_0, __release_ptc); 18weak_alias(dummy_0, __pthread_tsd_run_dtors); 19weak_alias(dummy_0, __do_orphaned_stdio_locks); 20weak_alias(dummy_0, __dl_thread_cleanup); 21 22_Noreturn void __pthread_exit(void *result) 23{ 24 pthread_t self = __pthread_self(); 25 sigset_t set; 26 27 self->canceldisable = 1; 28 self->cancelasync = 0; 29 self->result = result; 30 31 while (self->cancelbuf) { 32 void (*f)(void *) = self->cancelbuf->__f; 33 void *x = self->cancelbuf->__x; 34 self->cancelbuf = self->cancelbuf->__next; 35 f(x); 36 } 37 38 __pthread_tsd_run_dtors(); 39 40 __lock(self->exitlock); 41 42 /* Mark this thread dead before decrementing count */ 43 __lock(self->killlock); 44 self->dead = 1; 45 46 /* Block all signals before decrementing the live thread count. 47 * This is important to ensure that dynamically allocated TLS 48 * is not under-allocated/over-committed, and possibly for other 49 * reasons as well. */ 50 __block_all_sigs(&set); 51 52 /* Wait to unlock the kill lock, which governs functions like 53 * pthread_kill which target a thread id, until signals have 54 * been blocked. This precludes observation of the thread id 55 * as a live thread (with application code running in it) after 56 * the thread was reported dead by ESRCH being returned. */ 57 __unlock(self->killlock); 58 59 /* It's impossible to determine whether this is "the last thread" 60 * until performing the atomic decrement, since multiple threads 61 * could exit at the same time. For the last thread, revert the 62 * decrement and unblock signals to give the atexit handlers and 63 * stdio cleanup code a consistent state. */ 64 if (a_fetch_add(&libc.threads_minus_1, -1)==0) { 65 libc.threads_minus_1 = 0; 66 __restore_sigs(&set); 67 exit(0); 68 } 69 70 /* Process robust list in userspace to handle non-pshared mutexes 71 * and the detached thread case where the robust list head will 72 * be invalid when the kernel would process it. */ 73 __vm_lock(); 74 volatile void *volatile *rp; 75 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) { 76 pthread_mutex_t *m = (void *)((char *)rp 77 - offsetof(pthread_mutex_t, _m_next)); 78 int waiters = m->_m_waiters; 79 int priv = (m->_m_type & 128) ^ 128; 80 self->robust_list.pending = rp; 81 self->robust_list.head = *rp; 82 int cont = a_swap(&m->_m_lock, 0x40000000); 83 self->robust_list.pending = 0; 84 if (cont < 0 || waiters) 85 __wake(&m->_m_lock, 1, priv); 86 } 87 __vm_unlock(); 88 89 __do_orphaned_stdio_locks(); 90 __dl_thread_cleanup(); 91 92 if (self->detached && self->map_base) { 93 /* Detached threads must avoid the kernel clear_child_tid 94 * feature, since the virtual address will have been 95 * unmapped and possibly already reused by a new mapping 96 * at the time the kernel would perform the write. In 97 * the case of threads that started out detached, the 98 * initial clone flags are correct, but if the thread was 99 * detached later (== 2), we need to clear it here. */ 100 if (self->detached == 2) __syscall(SYS_set_tid_address, 0); 101 102 /* Robust list will no longer be valid, and was already 103 * processed above, so unregister it with the kernel. */ 104 if (self->robust_list.off) 105 __syscall(SYS_set_robust_list, 0, 3*sizeof(long)); 106 107 /* Since __unmapself bypasses the normal munmap code path, 108 * explicitly wait for vmlock holders first. */ 109 __vm_wait(); 110 111 /* The following call unmaps the thread's stack mapping 112 * and then exits without touching the stack. */ 113 __unmapself(self->map_base, self->map_size); 114 } 115 116 for (;;) __syscall(SYS_exit, 0); 117} 118 119void __do_cleanup_push(struct __ptcb *cb) 120{ 121 struct pthread *self = __pthread_self(); 122 cb->__next = self->cancelbuf; 123 self->cancelbuf = cb; 124} 125 126void __do_cleanup_pop(struct __ptcb *cb) 127{ 128 __pthread_self()->cancelbuf = cb->__next; 129} 130 131static int start(void *p) 132{ 133 pthread_t self = p; 134 if (self->startlock[0]) { 135 __wait(self->startlock, 0, 1, 1); 136 if (self->startlock[0]) { 137 self->detached = 2; 138 pthread_exit(0); 139 } 140 __restore_sigs(self->sigmask); 141 } 142 if (self->unblock_cancel) 143 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, 144 SIGPT_SET, 0, _NSIG/8); 145 __pthread_exit(self->start(self->start_arg)); 146 return 0; 147} 148 149static int start_c11(void *p) 150{ 151 pthread_t self = p; 152 int (*start)(void*) = (int(*)(void*)) self->start; 153 __pthread_exit((void *)(uintptr_t)start(self->start_arg)); 154 return 0; 155} 156 157#define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE) 158 159/* pthread_key_create.c overrides this */ 160static volatile size_t dummy = 0; 161weak_alias(dummy, __pthread_tsd_size); 162static void *dummy_tsd[1] = { 0 }; 163weak_alias(dummy_tsd, __pthread_tsd_main); 164 165volatile int __block_new_threads = 0; 166size_t __default_stacksize = DEFAULT_STACK_SIZE; 167size_t __default_guardsize = DEFAULT_GUARD_SIZE; 168 169static FILE *volatile dummy_file = 0; 170weak_alias(dummy_file, __stdin_used); 171weak_alias(dummy_file, __stdout_used); 172weak_alias(dummy_file, __stderr_used); 173 174static void init_file_lock(FILE *f) 175{ 176 if (f && f->lock<0) f->lock = 0; 177} 178 179void *__copy_tls(unsigned char *); 180 181int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg) 182{ 183 int ret, c11 = (attrp == __ATTRP_C11_THREAD); 184 size_t size, guard; 185 struct pthread *self, *new; 186 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit; 187 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND 188 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS 189 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED; 190 int do_sched = 0; 191 pthread_attr_t attr = { 0 }; 192 193 if (!libc.can_do_threads) return ENOSYS; 194 self = __pthread_self(); 195 if (!libc.threaded) { 196 for (FILE *f=*__ofl_lock(); f; f=f->next) 197 init_file_lock(f); 198 __ofl_unlock(); 199 init_file_lock(__stdin_used); 200 init_file_lock(__stdout_used); 201 init_file_lock(__stderr_used); 202 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8); 203 self->tsd = (void **)__pthread_tsd_main; 204 libc.threaded = 1; 205 } 206 if (attrp && !c11) attr = *attrp; 207 208 __acquire_ptc(); 209 if (!attrp || c11) { 210 attr._a_stacksize = __default_stacksize; 211 attr._a_guardsize = __default_guardsize; 212 } 213 214 if (__block_new_threads) __wait(&__block_new_threads, 0, 1, 1); 215 216 if (attr._a_stackaddr) { 217 size_t need = libc.tls_size + __pthread_tsd_size; 218 size = attr._a_stacksize; 219 stack = (void *)(attr._a_stackaddr & -16); 220 stack_limit = (void *)(attr._a_stackaddr - size); 221 /* Use application-provided stack for TLS only when 222 * it does not take more than ~12% or 2k of the 223 * application's stack space. */ 224 if (need < size/8 && need < 2048) { 225 tsd = stack - __pthread_tsd_size; 226 stack = tsd - libc.tls_size; 227 memset(stack, 0, need); 228 } else { 229 size = ROUND(need); 230 guard = 0; 231 } 232 } else { 233 guard = ROUND(attr._a_guardsize); 234 size = guard + ROUND(attr._a_stacksize 235 + libc.tls_size + __pthread_tsd_size); 236 } 237 238 if (!tsd) { 239 if (guard) { 240 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0); 241 if (map == MAP_FAILED) goto fail; 242 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE) 243 && errno != ENOSYS) { 244 __munmap(map, size); 245 goto fail; 246 } 247 } else { 248 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); 249 if (map == MAP_FAILED) goto fail; 250 } 251 tsd = map + size - __pthread_tsd_size; 252 if (!stack) { 253 stack = tsd - libc.tls_size; 254 stack_limit = map + guard; 255 } 256 } 257 258 new = __copy_tls(tsd - libc.tls_size); 259 new->map_base = map; 260 new->map_size = size; 261 new->stack = stack; 262 new->stack_size = stack - stack_limit; 263 new->start = entry; 264 new->start_arg = arg; 265 new->self = new; 266 new->tsd = (void *)tsd; 267 new->locale = &libc.global_locale; 268 if (attr._a_detach) { 269 new->detached = 1; 270 flags -= CLONE_CHILD_CLEARTID; 271 } 272 if (attr._a_sched) { 273 do_sched = new->startlock[0] = 1; 274 __block_app_sigs(new->sigmask); 275 } 276 new->robust_list.head = &new->robust_list.head; 277 new->unblock_cancel = self->cancel; 278 new->CANARY = self->CANARY; 279 280 a_inc(&libc.threads_minus_1); 281 ret = __clone((c11 ? start_c11 : start), stack, flags, new, &new->tid, TP_ADJ(new), &new->tid); 282 283 __release_ptc(); 284 285 if (do_sched) { 286 __restore_sigs(new->sigmask); 287 } 288 289 if (ret < 0) { 290 a_dec(&libc.threads_minus_1); 291 if (map) __munmap(map, size); 292 return EAGAIN; 293 } 294 295 if (do_sched) { 296 ret = __syscall(SYS_sched_setscheduler, new->tid, 297 attr._a_policy, &attr._a_prio); 298 a_store(new->startlock, ret<0 ? 2 : 0); 299 __wake(new->startlock, 1, 1); 300 if (ret < 0) return -ret; 301 } 302 303 *res = new; 304 return 0; 305fail: 306 __release_ptc(); 307 return EAGAIN; 308} 309 310weak_alias(__pthread_exit, pthread_exit); 311weak_alias(__pthread_create, pthread_create); 312