kern_thr.c revision 155501
1/*- 2 * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/kern_thr.c 155501 2006-02-10 03:34:29Z davidxu $"); 29 30#include <sys/param.h> 31#include <sys/kernel.h> 32#include <sys/lock.h> 33#include <sys/mutex.h> 34#include <sys/proc.h> 35#include <sys/resourcevar.h> 36#include <sys/sched.h> 37#include <sys/sysctl.h> 38#include <sys/smp.h> 39#include <sys/sysent.h> 40#include <sys/systm.h> 41#include <sys/sysproto.h> 42#include <sys/signalvar.h> 43#include <sys/ucontext.h> 44#include <sys/thr.h> 45#include <sys/umtx.h> 46#include <sys/limits.h> 47 48#include <machine/frame.h> 49 50extern int max_threads_per_proc; 51extern int max_groups_per_proc; 52 53SYSCTL_DECL(_kern_threads); 54static int thr_scope = 0; 55SYSCTL_INT(_kern_threads, OID_AUTO, thr_scope, CTLFLAG_RW, 56 &thr_scope, 0, "sys or proc scope scheduling"); 57 58static int thr_concurrency = 0; 59SYSCTL_INT(_kern_threads, OID_AUTO, thr_concurrency, CTLFLAG_RW, 60 &thr_concurrency, 0, "a concurrency value if not default"); 61 62static int create_thread(struct thread *td, mcontext_t *ctx, 63 void (*start_func)(void *), void *arg, 64 char *stack_base, size_t stack_size, 65 char *tls_base, 66 long *child_tid, long *parent_tid, 67 int flags); 68 69/* 70 * System call interface. 71 */ 72int 73thr_create(struct thread *td, struct thr_create_args *uap) 74 /* ucontext_t *ctx, long *id, int flags */ 75{ 76 ucontext_t ctx; 77 int error; 78 79 if ((error = copyin(uap->ctx, &ctx, sizeof(ctx)))) 80 return (error); 81 82 error = create_thread(td, &ctx.uc_mcontext, NULL, NULL, 83 NULL, 0, NULL, uap->id, NULL, uap->flags); 84 return (error); 85} 86 87int 88thr_new(struct thread *td, struct thr_new_args *uap) 89 /* struct thr_param * */ 90{ 91 struct thr_param param; 92 int error; 93 94 if (uap->param_size < sizeof(param)) 95 return (EINVAL); 96 if ((error = copyin(uap->param, ¶m, sizeof(param)))) 97 return (error); 98 error = create_thread(td, NULL, param.start_func, param.arg, 99 param.stack_base, param.stack_size, param.tls_base, 100 param.child_tid, param.parent_tid, param.flags); 101 return (error); 102} 103 104static int 105create_thread(struct thread *td, mcontext_t *ctx, 106 void (*start_func)(void *), void *arg, 107 char *stack_base, size_t stack_size, 108 char *tls_base, 109 long *child_tid, long *parent_tid, 110 int flags) 111{ 112 stack_t stack; 113 struct thread *newtd; 114 struct ksegrp *kg, *newkg; 115 struct proc *p; 116 long id; 117 int error, scope_sys, linkkg; 118 119 error = 0; 120 p = td->td_proc; 121 kg = td->td_ksegrp; 122 123 /* Have race condition but it is cheap. */ 124 if ((p->p_numksegrps >= max_groups_per_proc) || 125 (p->p_numthreads >= max_threads_per_proc)) { 126 return (EPROCLIM); 127 } 128 129 /* Check PTHREAD_SCOPE_SYSTEM */ 130 scope_sys = (flags & THR_SYSTEM_SCOPE) != 0; 131 132 /* sysctl overrides user's flag */ 133 if (thr_scope == 1) 134 scope_sys = 0; 135 else if (thr_scope == 2) 136 scope_sys = 1; 137 138 /* Initialize our td and new ksegrp.. */ 139 newtd = thread_alloc(); 140 141 /* 142 * Try the copyout as soon as we allocate the td so we don't 143 * have to tear things down in a failure case below. 144 * Here we copy out tid to two places, one for child and one 145 * for parent, because pthread can create a detached thread, 146 * if parent wants to safely access child tid, it has to provide 147 * its storage, because child thread may exit quickly and 148 * memory is freed before parent thread can access it. 149 */ 150 id = newtd->td_tid; 151 if ((child_tid != NULL && 152 (error = copyout(&id, child_tid, sizeof(long)))) || 153 (parent_tid != NULL && 154 (error = copyout(&id, parent_tid, sizeof(long))))) { 155 thread_free(newtd); 156 return (error); 157 } 158 bzero(&newtd->td_startzero, 159 __rangeof(struct thread, td_startzero, td_endzero)); 160 bcopy(&td->td_startcopy, &newtd->td_startcopy, 161 __rangeof(struct thread, td_startcopy, td_endcopy)); 162 newtd->td_proc = td->td_proc; 163 newtd->td_ucred = crhold(td->td_ucred); 164 165 cpu_set_upcall(newtd, td); 166 167 if (ctx != NULL) { /* old way to set user context */ 168 error = set_mcontext(newtd, ctx); 169 if (error != 0) { 170 thread_free(newtd); 171 crfree(td->td_ucred); 172 return (error); 173 } 174 } else { 175 /* Set up our machine context. */ 176 stack.ss_sp = stack_base; 177 stack.ss_size = stack_size; 178 /* Set upcall address to user thread entry function. */ 179 cpu_set_upcall_kse(newtd, start_func, arg, &stack); 180 /* Setup user TLS address and TLS pointer register. */ 181 error = cpu_set_user_tls(newtd, tls_base); 182 if (error != 0) { 183 thread_free(newtd); 184 crfree(td->td_ucred); 185 return (error); 186 } 187 } 188 189 if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { 190 /* Treat initial thread as it has PTHREAD_SCOPE_PROCESS. */ 191 p->p_procscopegrp = kg; 192 mtx_lock_spin(&sched_lock); 193 sched_set_concurrency(kg, 194 thr_concurrency ? thr_concurrency : (2*mp_ncpus)); 195 mtx_unlock_spin(&sched_lock); 196 } 197 198 linkkg = 0; 199 if (scope_sys) { 200 linkkg = 1; 201 newkg = ksegrp_alloc(); 202 bzero(&newkg->kg_startzero, 203 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 204 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 205 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 206 sched_init_concurrency(newkg); 207 PROC_LOCK(td->td_proc); 208 } else { 209 /* 210 * Try to create a KSE group which will be shared 211 * by all PTHREAD_SCOPE_PROCESS threads. 212 */ 213retry: 214 PROC_LOCK(td->td_proc); 215 if ((newkg = p->p_procscopegrp) == NULL) { 216 PROC_UNLOCK(p); 217 newkg = ksegrp_alloc(); 218 bzero(&newkg->kg_startzero, 219 __rangeof(struct ksegrp, kg_startzero, kg_endzero)); 220 bcopy(&kg->kg_startcopy, &newkg->kg_startcopy, 221 __rangeof(struct ksegrp, kg_startcopy, kg_endcopy)); 222 PROC_LOCK(p); 223 if (p->p_procscopegrp == NULL) { 224 p->p_procscopegrp = newkg; 225 sched_init_concurrency(newkg); 226 sched_set_concurrency(newkg, 227 thr_concurrency ? thr_concurrency : (2*mp_ncpus)); 228 linkkg = 1; 229 } else { 230 PROC_UNLOCK(p); 231 ksegrp_free(newkg); 232 goto retry; 233 } 234 } 235 } 236 237 td->td_proc->p_flag |= P_HADTHREADS; 238 newtd->td_sigmask = td->td_sigmask; 239 mtx_lock_spin(&sched_lock); 240 if (linkkg) 241 ksegrp_link(newkg, p); 242 thread_link(newtd, newkg); 243 PROC_UNLOCK(p); 244 245 /* let the scheduler know about these things. */ 246 if (linkkg) 247 sched_fork_ksegrp(td, newkg); 248 sched_fork_thread(td, newtd); 249 TD_SET_CAN_RUN(newtd); 250 /* if ((flags & THR_SUSPENDED) == 0) */ 251 setrunqueue(newtd, SRQ_BORING); 252 mtx_unlock_spin(&sched_lock); 253 254 return (error); 255} 256 257int 258thr_self(struct thread *td, struct thr_self_args *uap) 259 /* long *id */ 260{ 261 long id; 262 int error; 263 264 id = td->td_tid; 265 if ((error = copyout(&id, uap->id, sizeof(long)))) 266 return (error); 267 268 return (0); 269} 270 271int 272thr_exit(struct thread *td, struct thr_exit_args *uap) 273 /* long *state */ 274{ 275 struct proc *p; 276 277 p = td->td_proc; 278 279 /* Signal userland that it can free the stack. */ 280 if ((void *)uap->state != NULL) { 281 suword((void *)uap->state, 1); 282 kern_umtx_wake(td, uap->state, INT_MAX); 283 } 284 285 PROC_LOCK(p); 286 sigqueue_flush(&td->td_sigqueue); 287 mtx_lock_spin(&sched_lock); 288 289 /* 290 * Shutting down last thread in the proc. This will actually 291 * call exit() in the trampoline when it returns. 292 */ 293 if (p->p_numthreads != 1) { 294 thread_stopped(p); 295 thread_exit(); 296 /* NOTREACHED */ 297 } 298 mtx_unlock_spin(&sched_lock); 299 PROC_UNLOCK(p); 300 return (0); 301} 302 303int 304thr_kill(struct thread *td, struct thr_kill_args *uap) 305 /* long id, int sig */ 306{ 307 struct thread *ttd; 308 struct proc *p; 309 int error; 310 311 p = td->td_proc; 312 error = 0; 313 PROC_LOCK(p); 314 if (uap->id == -1) { 315 if (uap->sig != 0 && !_SIG_VALID(uap->sig)) { 316 error = EINVAL; 317 } else { 318 error = ESRCH; 319 FOREACH_THREAD_IN_PROC(p, ttd) { 320 if (ttd != td) { 321 error = 0; 322 if (uap->sig == 0) 323 break; 324 tdsignal(p, ttd, uap->sig, NULL); 325 } 326 } 327 } 328 } else { 329 if (uap->id != td->td_tid) 330 ttd = thread_find(p, uap->id); 331 else 332 ttd = td; 333 if (ttd == NULL) 334 error = ESRCH; 335 else if (uap->sig == 0) 336 ; 337 else if (!_SIG_VALID(uap->sig)) 338 error = EINVAL; 339 else 340 tdsignal(p, ttd, uap->sig, NULL); 341 } 342 PROC_UNLOCK(p); 343 return (error); 344} 345 346int 347thr_suspend(struct thread *td, struct thr_suspend_args *uap) 348 /* const struct timespec *timeout */ 349{ 350 struct timespec ts; 351 struct timeval tv; 352 int error; 353 int hz; 354 355 hz = 0; 356 error = 0; 357 if (uap->timeout != NULL) { 358 error = copyin((const void *)uap->timeout, (void *)&ts, 359 sizeof(struct timespec)); 360 if (error != 0) 361 return (error); 362 if (ts.tv_nsec < 0 || ts.tv_nsec > 1000000000) 363 return (EINVAL); 364 if (ts.tv_sec == 0 && ts.tv_nsec == 0) 365 return (ETIMEDOUT); 366 TIMESPEC_TO_TIMEVAL(&tv, &ts); 367 hz = tvtohz(&tv); 368 } 369 PROC_LOCK(td->td_proc); 370 if ((td->td_flags & TDF_THRWAKEUP) == 0) 371 error = msleep((void *)td, &td->td_proc->p_mtx, 372 td->td_priority | PCATCH, "lthr", hz); 373 if (td->td_flags & TDF_THRWAKEUP) { 374 mtx_lock_spin(&sched_lock); 375 td->td_flags &= ~TDF_THRWAKEUP; 376 mtx_unlock_spin(&sched_lock); 377 PROC_UNLOCK(td->td_proc); 378 return (0); 379 } 380 PROC_UNLOCK(td->td_proc); 381 if (error == EWOULDBLOCK) 382 error = ETIMEDOUT; 383 else if (error == ERESTART) { 384 if (hz != 0) 385 error = EINTR; 386 } 387 return (error); 388} 389 390int 391thr_wake(struct thread *td, struct thr_wake_args *uap) 392 /* long id */ 393{ 394 struct proc *p; 395 struct thread *ttd; 396 397 p = td->td_proc; 398 PROC_LOCK(p); 399 ttd = thread_find(p, uap->id); 400 if (ttd == NULL) { 401 PROC_UNLOCK(p); 402 return (ESRCH); 403 } 404 mtx_lock_spin(&sched_lock); 405 ttd->td_flags |= TDF_THRWAKEUP; 406 mtx_unlock_spin(&sched_lock); 407 wakeup((void *)ttd); 408 PROC_UNLOCK(p); 409 return (0); 410} 411 412int 413thr_set_name(struct thread *td, struct thr_set_name_args *uap) 414{ 415 struct proc *p = td->td_proc; 416 char name[MAXCOMLEN + 1]; 417 struct thread *ttd; 418 int error; 419 420 error = 0; 421 name[0] = '\0'; 422 if (uap->name != NULL) { 423 error = copyinstr(uap->name, name, sizeof(name), 424 NULL); 425 if (error) 426 return (error); 427 } 428 PROC_LOCK(p); 429 if (uap->id == td->td_tid) 430 ttd = td; 431 else 432 ttd = thread_find(p, uap->id); 433 if (ttd != NULL) 434 strcpy(ttd->td_name, name); 435 else 436 error = ESRCH; 437 PROC_UNLOCK(p); 438 return (error); 439} 440