1144518Sdavidxu/* 2144518Sdavidxu * Copyright (c) 2005 David Xu <davidxu@freebsd.org> 3144518Sdavidxu * Copyright (C) 2003 Daniel M. Eischen <deischen@freebsd.org> 4144518Sdavidxu * All rights reserved. 5144518Sdavidxu * 6144518Sdavidxu * Redistribution and use in source and binary forms, with or without 7144518Sdavidxu * modification, are permitted provided that the following conditions 8144518Sdavidxu * are met: 9144518Sdavidxu * 1. Redistributions of source code must retain the above copyright 10144518Sdavidxu * notice unmodified, this list of conditions, and the following 11144518Sdavidxu * disclaimer. 12144518Sdavidxu * 2. Redistributions in binary form must reproduce the above copyright 13144518Sdavidxu * notice, this list of conditions and the following disclaimer in the 14144518Sdavidxu * documentation and/or other materials provided with the distribution. 15144518Sdavidxu * 16144518Sdavidxu * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17144518Sdavidxu * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18144518Sdavidxu * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19144518Sdavidxu * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20144518Sdavidxu * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21144518Sdavidxu * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22144518Sdavidxu * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23144518Sdavidxu * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24144518Sdavidxu * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25144518Sdavidxu * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26144518Sdavidxu * 27144518Sdavidxu * $FreeBSD$ 28144518Sdavidxu */ 29144518Sdavidxu 30144518Sdavidxu#include <sys/types.h> 31144518Sdavidxu#include <sys/queue.h> 32144518Sdavidxu 33144518Sdavidxu#include <stdlib.h> 34144518Sdavidxu#include <string.h> 35144518Sdavidxu#include <pthread.h> 36144518Sdavidxu 37144518Sdavidxu#include "thr_private.h" 38144518Sdavidxu#include "libc_private.h" 39144518Sdavidxu 40144518Sdavidxu/*#define DEBUG_THREAD_LIST */ 41144518Sdavidxu#ifdef DEBUG_THREAD_LIST 42144518Sdavidxu#define DBG_MSG stdout_debug 43144518Sdavidxu#else 44144518Sdavidxu#define DBG_MSG(x...) 45144518Sdavidxu#endif 46144518Sdavidxu 47157079Sdavidxu#define MAX_THREADS 100000 48157079Sdavidxu 49144518Sdavidxu/* 50144518Sdavidxu * Define a high water mark for the maximum number of threads that 51144518Sdavidxu * will be cached. Once this level is reached, any extra threads 52144518Sdavidxu * will be free()'d. 53144518Sdavidxu */ 54144518Sdavidxu#define MAX_CACHED_THREADS 100 55144518Sdavidxu 56144518Sdavidxu/* 57144518Sdavidxu * We've got to keep track of everything that is allocated, not only 58144518Sdavidxu * to have a speedy free list, but also so they can be deallocated 59144518Sdavidxu * after a fork(). 60144518Sdavidxu */ 61144518Sdavidxustatic TAILQ_HEAD(, pthread) free_threadq; 62162061Sdavidxustatic struct umutex free_thread_lock = DEFAULT_UMUTEX; 63162061Sdavidxustatic struct umutex tcb_lock = DEFAULT_UMUTEX; 64144518Sdavidxustatic int free_thread_count = 0; 65144518Sdavidxustatic int inited = 0; 66157079Sdavidxustatic int total_threads; 67144518Sdavidxu 68144518SdavidxuLIST_HEAD(thread_hash_head, pthread); 69144518Sdavidxu#define HASH_QUEUES 128 70144518Sdavidxustatic struct thread_hash_head thr_hashtable[HASH_QUEUES]; 71144736Sdavidxu#define THREAD_HASH(thrd) (((unsigned long)thrd >> 8) % HASH_QUEUES) 72144518Sdavidxu 73144518Sdavidxustatic void thr_destroy(struct pthread *curthread, struct pthread *thread); 74144518Sdavidxu 75144518Sdavidxuvoid 76144518Sdavidxu_thr_list_init(void) 77144518Sdavidxu{ 78144518Sdavidxu int i; 79144518Sdavidxu 80144518Sdavidxu _gc_count = 0; 81157079Sdavidxu total_threads = 1; 82212536Sdavidxu _thr_urwlock_init(&_thr_list_lock); 83144518Sdavidxu TAILQ_INIT(&_thread_list); 84144518Sdavidxu TAILQ_INIT(&free_threadq); 85162061Sdavidxu _thr_umutex_init(&free_thread_lock); 86162061Sdavidxu _thr_umutex_init(&tcb_lock); 87144518Sdavidxu if (inited) { 88144518Sdavidxu for (i = 0; i < HASH_QUEUES; ++i) 89144518Sdavidxu LIST_INIT(&thr_hashtable[i]); 90144518Sdavidxu } 91144518Sdavidxu inited = 1; 92144518Sdavidxu} 93144518Sdavidxu 94144518Sdavidxuvoid 95144518Sdavidxu_thr_gc(struct pthread *curthread) 96144518Sdavidxu{ 97144518Sdavidxu struct pthread *td, *td_next; 98144518Sdavidxu TAILQ_HEAD(, pthread) worklist; 99144518Sdavidxu 100144518Sdavidxu TAILQ_INIT(&worklist); 101212536Sdavidxu THREAD_LIST_WRLOCK(curthread); 102144518Sdavidxu 103144518Sdavidxu /* Check the threads waiting for GC. */ 104160426Sdelphij TAILQ_FOREACH_SAFE(td, &_thread_gc_list, gcle, td_next) { 105144921Sdavidxu if (td->tid != TID_TERMINATED) { 106144518Sdavidxu /* make sure we are not still in userland */ 107144518Sdavidxu continue; 108144518Sdavidxu } 109144518Sdavidxu _thr_stack_free(&td->attr); 110212536Sdavidxu THR_GCLIST_REMOVE(td); 111212536Sdavidxu TAILQ_INSERT_HEAD(&worklist, td, gcle); 112144518Sdavidxu } 113144518Sdavidxu THREAD_LIST_UNLOCK(curthread); 114144518Sdavidxu 115144518Sdavidxu while ((td = TAILQ_FIRST(&worklist)) != NULL) { 116144518Sdavidxu TAILQ_REMOVE(&worklist, td, gcle); 117144518Sdavidxu /* 118144518Sdavidxu * XXX we don't free initial thread, because there might 119144518Sdavidxu * have some code referencing initial thread. 120144518Sdavidxu */ 121144518Sdavidxu if (td == _thr_initial) { 122144518Sdavidxu DBG_MSG("Initial thread won't be freed\n"); 123144518Sdavidxu continue; 124144518Sdavidxu } 125144518Sdavidxu 126144518Sdavidxu _thr_free(curthread, td); 127144518Sdavidxu } 128144518Sdavidxu} 129144518Sdavidxu 130144518Sdavidxustruct pthread * 131144518Sdavidxu_thr_alloc(struct pthread *curthread) 132144518Sdavidxu{ 133144518Sdavidxu struct pthread *thread = NULL; 134144518Sdavidxu struct tcb *tcb; 135144518Sdavidxu 136144518Sdavidxu if (curthread != NULL) { 137144518Sdavidxu if (GC_NEEDED()) 138144518Sdavidxu _thr_gc(curthread); 139144518Sdavidxu if (free_thread_count > 0) { 140144518Sdavidxu THR_LOCK_ACQUIRE(curthread, &free_thread_lock); 141144518Sdavidxu if ((thread = TAILQ_FIRST(&free_threadq)) != NULL) { 142144518Sdavidxu TAILQ_REMOVE(&free_threadq, thread, tle); 143144518Sdavidxu free_thread_count--; 144144518Sdavidxu } 145144518Sdavidxu THR_LOCK_RELEASE(curthread, &free_thread_lock); 146144518Sdavidxu } 147144518Sdavidxu } 148144518Sdavidxu if (thread == NULL) { 149157079Sdavidxu if (total_threads > MAX_THREADS) 150157079Sdavidxu return (NULL); 151157079Sdavidxu atomic_fetchadd_int(&total_threads, 1); 152231635Sdavidxu thread = calloc(1, sizeof(struct pthread)); 153157079Sdavidxu if (thread == NULL) { 154157079Sdavidxu atomic_fetchadd_int(&total_threads, -1); 155144518Sdavidxu return (NULL); 156157079Sdavidxu } 157231635Sdavidxu if ((thread->sleepqueue = _sleepq_alloc()) == NULL || 158231635Sdavidxu (thread->wake_addr = _thr_alloc_wake_addr()) == NULL) { 159231635Sdavidxu thr_destroy(curthread, thread); 160231635Sdavidxu atomic_fetchadd_int(&total_threads, -1); 161231635Sdavidxu return (NULL); 162231635Sdavidxu } 163231635Sdavidxu } else { 164231635Sdavidxu bzero(&thread->_pthread_startzero, 165231635Sdavidxu __rangeof(struct pthread, _pthread_startzero, _pthread_endzero)); 166144518Sdavidxu } 167144518Sdavidxu if (curthread != NULL) { 168144518Sdavidxu THR_LOCK_ACQUIRE(curthread, &tcb_lock); 169144518Sdavidxu tcb = _tcb_ctor(thread, 0 /* not initial tls */); 170144518Sdavidxu THR_LOCK_RELEASE(curthread, &tcb_lock); 171144518Sdavidxu } else { 172144518Sdavidxu tcb = _tcb_ctor(thread, 1 /* initial tls */); 173144518Sdavidxu } 174144518Sdavidxu if (tcb != NULL) { 175144518Sdavidxu thread->tcb = tcb; 176144518Sdavidxu } else { 177144518Sdavidxu thr_destroy(curthread, thread); 178157079Sdavidxu atomic_fetchadd_int(&total_threads, -1); 179144518Sdavidxu thread = NULL; 180144518Sdavidxu } 181144518Sdavidxu return (thread); 182144518Sdavidxu} 183144518Sdavidxu 184144518Sdavidxuvoid 185144518Sdavidxu_thr_free(struct pthread *curthread, struct pthread *thread) 186144518Sdavidxu{ 187144518Sdavidxu DBG_MSG("Freeing thread %p\n", thread); 188155330Sdavidxu 189144518Sdavidxu /* 190144518Sdavidxu * Always free tcb, as we only know it is part of RTLD TLS 191144518Sdavidxu * block, but don't know its detail and can not assume how 192144518Sdavidxu * it works, so better to avoid caching it here. 193144518Sdavidxu */ 194144518Sdavidxu if (curthread != NULL) { 195144518Sdavidxu THR_LOCK_ACQUIRE(curthread, &tcb_lock); 196144518Sdavidxu _tcb_dtor(thread->tcb); 197144518Sdavidxu THR_LOCK_RELEASE(curthread, &tcb_lock); 198144518Sdavidxu } else { 199144518Sdavidxu _tcb_dtor(thread->tcb); 200144518Sdavidxu } 201144518Sdavidxu thread->tcb = NULL; 202144518Sdavidxu if ((curthread == NULL) || (free_thread_count >= MAX_CACHED_THREADS)) { 203144518Sdavidxu thr_destroy(curthread, thread); 204157079Sdavidxu atomic_fetchadd_int(&total_threads, -1); 205144518Sdavidxu } else { 206144518Sdavidxu /* 207144518Sdavidxu * Add the thread to the free thread list, this also avoids 208144518Sdavidxu * pthread id is reused too quickly, may help some buggy apps. 209144518Sdavidxu */ 210144518Sdavidxu THR_LOCK_ACQUIRE(curthread, &free_thread_lock); 211144518Sdavidxu TAILQ_INSERT_TAIL(&free_threadq, thread, tle); 212144518Sdavidxu free_thread_count++; 213144518Sdavidxu THR_LOCK_RELEASE(curthread, &free_thread_lock); 214144518Sdavidxu } 215144518Sdavidxu} 216144518Sdavidxu 217144518Sdavidxustatic void 218144518Sdavidxuthr_destroy(struct pthread *curthread __unused, struct pthread *thread) 219144518Sdavidxu{ 220231635Sdavidxu if (thread->sleepqueue != NULL) 221231635Sdavidxu _sleepq_free(thread->sleepqueue); 222231635Sdavidxu if (thread->wake_addr != NULL) 223231635Sdavidxu _thr_release_wake_addr(thread->wake_addr); 224144518Sdavidxu free(thread); 225144518Sdavidxu} 226144518Sdavidxu 227144518Sdavidxu/* 228144711Sdavidxu * Add the thread to the list of all threads and increment 229144711Sdavidxu * number of active threads. 230144518Sdavidxu */ 231144518Sdavidxuvoid 232144518Sdavidxu_thr_link(struct pthread *curthread, struct pthread *thread) 233144518Sdavidxu{ 234212536Sdavidxu THREAD_LIST_WRLOCK(curthread); 235144518Sdavidxu THR_LIST_ADD(thread); 236144518Sdavidxu THREAD_LIST_UNLOCK(curthread); 237212536Sdavidxu atomic_add_int(&_thread_active_threads, 1); 238144518Sdavidxu} 239144518Sdavidxu 240144518Sdavidxu/* 241144518Sdavidxu * Remove an active thread. 242144518Sdavidxu */ 243144518Sdavidxuvoid 244144518Sdavidxu_thr_unlink(struct pthread *curthread, struct pthread *thread) 245144518Sdavidxu{ 246212536Sdavidxu THREAD_LIST_WRLOCK(curthread); 247144518Sdavidxu THR_LIST_REMOVE(thread); 248144518Sdavidxu THREAD_LIST_UNLOCK(curthread); 249212536Sdavidxu atomic_add_int(&_thread_active_threads, -1); 250144518Sdavidxu} 251144518Sdavidxu 252144518Sdavidxuvoid 253144518Sdavidxu_thr_hash_add(struct pthread *thread) 254144518Sdavidxu{ 255144518Sdavidxu struct thread_hash_head *head; 256144518Sdavidxu 257144518Sdavidxu head = &thr_hashtable[THREAD_HASH(thread)]; 258144518Sdavidxu LIST_INSERT_HEAD(head, thread, hle); 259144518Sdavidxu} 260144518Sdavidxu 261144518Sdavidxuvoid 262144518Sdavidxu_thr_hash_remove(struct pthread *thread) 263144518Sdavidxu{ 264144518Sdavidxu LIST_REMOVE(thread, hle); 265144518Sdavidxu} 266144518Sdavidxu 267144518Sdavidxustruct pthread * 268144518Sdavidxu_thr_hash_find(struct pthread *thread) 269144518Sdavidxu{ 270144518Sdavidxu struct pthread *td; 271144518Sdavidxu struct thread_hash_head *head; 272144518Sdavidxu 273144518Sdavidxu head = &thr_hashtable[THREAD_HASH(thread)]; 274144518Sdavidxu LIST_FOREACH(td, head, hle) { 275144518Sdavidxu if (td == thread) 276144518Sdavidxu return (thread); 277144518Sdavidxu } 278144518Sdavidxu return (NULL); 279144518Sdavidxu} 280144518Sdavidxu 281144518Sdavidxu/* 282144518Sdavidxu * Find a thread in the linked list of active threads and add a reference 283144518Sdavidxu * to it. Threads with positive reference counts will not be deallocated 284144518Sdavidxu * until all references are released. 285144518Sdavidxu */ 286144518Sdavidxuint 287144518Sdavidxu_thr_ref_add(struct pthread *curthread, struct pthread *thread, 288144518Sdavidxu int include_dead) 289144518Sdavidxu{ 290144518Sdavidxu int ret; 291144518Sdavidxu 292144518Sdavidxu if (thread == NULL) 293144518Sdavidxu /* Invalid thread: */ 294144518Sdavidxu return (EINVAL); 295144518Sdavidxu 296144518Sdavidxu if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) { 297144518Sdavidxu thread->refcount++; 298164583Sdavidxu THR_CRITICAL_ENTER(curthread); 299212536Sdavidxu THR_THREAD_UNLOCK(curthread, thread); 300144518Sdavidxu } 301144518Sdavidxu 302144518Sdavidxu /* Return zero if the thread exists: */ 303144518Sdavidxu return (ret); 304144518Sdavidxu} 305144518Sdavidxu 306144518Sdavidxuvoid 307144518Sdavidxu_thr_ref_delete(struct pthread *curthread, struct pthread *thread) 308144518Sdavidxu{ 309212536Sdavidxu THR_THREAD_LOCK(curthread, thread); 310212536Sdavidxu thread->refcount--; 311212536Sdavidxu _thr_try_gc(curthread, thread); 312212536Sdavidxu THR_CRITICAL_LEAVE(curthread); 313154055Sdavidxu} 314154055Sdavidxu 315212536Sdavidxu/* entered with thread lock held, exit with thread lock released */ 316154055Sdavidxuvoid 317212536Sdavidxu_thr_try_gc(struct pthread *curthread, struct pthread *thread) 318154055Sdavidxu{ 319212536Sdavidxu if (THR_SHOULD_GC(thread)) { 320212536Sdavidxu THR_REF_ADD(curthread, thread); 321212536Sdavidxu THR_THREAD_UNLOCK(curthread, thread); 322212536Sdavidxu THREAD_LIST_WRLOCK(curthread); 323212536Sdavidxu THR_THREAD_LOCK(curthread, thread); 324212536Sdavidxu THR_REF_DEL(curthread, thread); 325212536Sdavidxu if (THR_SHOULD_GC(thread)) { 326212536Sdavidxu THR_LIST_REMOVE(thread); 327144518Sdavidxu THR_GCLIST_ADD(thread); 328212536Sdavidxu } 329212536Sdavidxu THR_THREAD_UNLOCK(curthread, thread); 330212536Sdavidxu THREAD_LIST_UNLOCK(curthread); 331212536Sdavidxu } else { 332212536Sdavidxu THR_THREAD_UNLOCK(curthread, thread); 333144518Sdavidxu } 334144518Sdavidxu} 335144518Sdavidxu 336212536Sdavidxu/* return with thread lock held if thread is found */ 337144518Sdavidxuint 338212536Sdavidxu_thr_find_thread(struct pthread *curthread, struct pthread *thread, 339144518Sdavidxu int include_dead) 340144518Sdavidxu{ 341144518Sdavidxu struct pthread *pthread; 342212536Sdavidxu int ret; 343144518Sdavidxu 344144518Sdavidxu if (thread == NULL) 345144518Sdavidxu return (EINVAL); 346144518Sdavidxu 347212536Sdavidxu ret = 0; 348212536Sdavidxu THREAD_LIST_RDLOCK(curthread); 349144518Sdavidxu pthread = _thr_hash_find(thread); 350144518Sdavidxu if (pthread) { 351212536Sdavidxu THR_THREAD_LOCK(curthread, pthread); 352144518Sdavidxu if (include_dead == 0 && pthread->state == PS_DEAD) { 353212536Sdavidxu THR_THREAD_UNLOCK(curthread, pthread); 354212536Sdavidxu ret = ESRCH; 355212536Sdavidxu } 356212536Sdavidxu } else { 357212536Sdavidxu ret = ESRCH; 358144518Sdavidxu } 359212536Sdavidxu THREAD_LIST_UNLOCK(curthread); 360212536Sdavidxu return (ret); 361144518Sdavidxu} 362