kern_thr.c revision 315949
116Salm/*-
216Salm * Copyright (c) 2003, Jeffrey Roberson <jeff@freebsd.org>
316Salm * All rights reserved.
416Salm *
516Salm * Redistribution and use in source and binary forms, with or without
616Salm * modification, are permitted provided that the following conditions
716Salm * are met:
816Salm * 1. Redistributions of source code must retain the above copyright
9 *    notice unmodified, this list of conditions, and the following
10 *    disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: stable/11/sys/kern/kern_thr.c 315949 2017-03-25 13:33:23Z badger $");
29
30#include "opt_compat.h"
31#include "opt_posix.h"
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/lock.h>
35#include <sys/mutex.h>
36#include <sys/priv.h>
37#include <sys/proc.h>
38#include <sys/posix4.h>
39#include <sys/ptrace.h>
40#include <sys/racct.h>
41#include <sys/resourcevar.h>
42#include <sys/rwlock.h>
43#include <sys/sched.h>
44#include <sys/sysctl.h>
45#include <sys/smp.h>
46#include <sys/syscallsubr.h>
47#include <sys/sysent.h>
48#include <sys/systm.h>
49#include <sys/sysproto.h>
50#include <sys/signalvar.h>
51#include <sys/sysctl.h>
52#include <sys/ucontext.h>
53#include <sys/thr.h>
54#include <sys/rtprio.h>
55#include <sys/umtx.h>
56#include <sys/limits.h>
57
58#include <vm/vm_domain.h>
59
60#include <machine/frame.h>
61
62#include <security/audit/audit.h>
63
64static SYSCTL_NODE(_kern, OID_AUTO, threads, CTLFLAG_RW, 0,
65    "thread allocation");
66
67static int max_threads_per_proc = 1500;
68SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_per_proc, CTLFLAG_RW,
69    &max_threads_per_proc, 0, "Limit on threads per proc");
70
71static int max_threads_hits;
72SYSCTL_INT(_kern_threads, OID_AUTO, max_threads_hits, CTLFLAG_RD,
73    &max_threads_hits, 0, "kern.threads.max_threads_per_proc hit count");
74
75#ifdef COMPAT_FREEBSD32
76
77static inline int
78suword_lwpid(void *addr, lwpid_t lwpid)
79{
80	int error;
81
82	if (SV_CURPROC_FLAG(SV_LP64))
83		error = suword(addr, lwpid);
84	else
85		error = suword32(addr, lwpid);
86	return (error);
87}
88
89#else
90#define suword_lwpid	suword
91#endif
92
93/*
94 * System call interface.
95 */
96
97struct thr_create_initthr_args {
98	ucontext_t ctx;
99	long *tid;
100};
101
102static int
103thr_create_initthr(struct thread *td, void *thunk)
104{
105	struct thr_create_initthr_args *args;
106
107	/* Copy out the child tid. */
108	args = thunk;
109	if (args->tid != NULL && suword_lwpid(args->tid, td->td_tid))
110		return (EFAULT);
111
112	return (set_mcontext(td, &args->ctx.uc_mcontext));
113}
114
115int
116sys_thr_create(struct thread *td, struct thr_create_args *uap)
117    /* ucontext_t *ctx, long *id, int flags */
118{
119	struct thr_create_initthr_args args;
120	int error;
121
122	if ((error = copyin(uap->ctx, &args.ctx, sizeof(args.ctx))))
123		return (error);
124	args.tid = uap->id;
125	return (thread_create(td, NULL, thr_create_initthr, &args));
126}
127
128int
129sys_thr_new(struct thread *td, struct thr_new_args *uap)
130    /* struct thr_param * */
131{
132	struct thr_param param;
133	int error;
134
135	if (uap->param_size < 0 || uap->param_size > sizeof(param))
136		return (EINVAL);
137	bzero(&param, sizeof(param));
138	if ((error = copyin(uap->param, &param, uap->param_size)))
139		return (error);
140	return (kern_thr_new(td, &param));
141}
142
143static int
144thr_new_initthr(struct thread *td, void *thunk)
145{
146	stack_t stack;
147	struct thr_param *param;
148
149	/*
150	 * Here we copy out tid to two places, one for child and one
151	 * for parent, because pthread can create a detached thread,
152	 * if parent wants to safely access child tid, it has to provide
153	 * its storage, because child thread may exit quickly and
154	 * memory is freed before parent thread can access it.
155	 */
156	param = thunk;
157	if ((param->child_tid != NULL &&
158	    suword_lwpid(param->child_tid, td->td_tid)) ||
159	    (param->parent_tid != NULL &&
160	    suword_lwpid(param->parent_tid, td->td_tid)))
161		return (EFAULT);
162
163	/* Set up our machine context. */
164	stack.ss_sp = param->stack_base;
165	stack.ss_size = param->stack_size;
166	/* Set upcall address to user thread entry function. */
167	cpu_set_upcall(td, param->start_func, param->arg, &stack);
168	/* Setup user TLS address and TLS pointer register. */
169	return (cpu_set_user_tls(td, param->tls_base));
170}
171
172int
173kern_thr_new(struct thread *td, struct thr_param *param)
174{
175	struct rtprio rtp, *rtpp;
176	int error;
177
178	rtpp = NULL;
179	if (param->rtp != 0) {
180		error = copyin(param->rtp, &rtp, sizeof(struct rtprio));
181		if (error)
182			return (error);
183		rtpp = &rtp;
184	}
185	return (thread_create(td, rtpp, thr_new_initthr, param));
186}
187
188int
189thread_create(struct thread *td, struct rtprio *rtp,
190    int (*initialize_thread)(struct thread *, void *), void *thunk)
191{
192	struct thread *newtd;
193	struct proc *p;
194	int error;
195
196	p = td->td_proc;
197
198	if (rtp != NULL) {
199		switch(rtp->type) {
200		case RTP_PRIO_REALTIME:
201		case RTP_PRIO_FIFO:
202			/* Only root can set scheduler policy */
203			if (priv_check(td, PRIV_SCHED_SETPOLICY) != 0)
204				return (EPERM);
205			if (rtp->prio > RTP_PRIO_MAX)
206				return (EINVAL);
207			break;
208		case RTP_PRIO_NORMAL:
209			rtp->prio = 0;
210			break;
211		default:
212			return (EINVAL);
213		}
214	}
215
216#ifdef RACCT
217	if (racct_enable) {
218		PROC_LOCK(p);
219		error = racct_add(p, RACCT_NTHR, 1);
220		PROC_UNLOCK(p);
221		if (error != 0)
222			return (EPROCLIM);
223	}
224#endif
225
226	/* Initialize our td */
227	error = kern_thr_alloc(p, 0, &newtd);
228	if (error)
229		goto fail;
230
231	cpu_copy_thread(newtd, td);
232
233	bzero(&newtd->td_startzero,
234	    __rangeof(struct thread, td_startzero, td_endzero));
235	newtd->td_sleeptimo = 0;
236	bcopy(&td->td_startcopy, &newtd->td_startcopy,
237	    __rangeof(struct thread, td_startcopy, td_endcopy));
238	newtd->td_proc = td->td_proc;
239	newtd->td_rb_list = newtd->td_rbp_list = newtd->td_rb_inact = 0;
240	thread_cow_get(newtd, td);
241
242	error = initialize_thread(newtd, thunk);
243	if (error != 0) {
244		thread_cow_free(newtd);
245		thread_free(newtd);
246		goto fail;
247	}
248
249	PROC_LOCK(p);
250	p->p_flag |= P_HADTHREADS;
251	thread_link(newtd, p);
252	bcopy(p->p_comm, newtd->td_name, sizeof(newtd->td_name));
253	thread_lock(td);
254	/* let the scheduler know about these things. */
255	sched_fork_thread(td, newtd);
256	thread_unlock(td);
257	if (P_SHOULDSTOP(p))
258		newtd->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
259	if (p->p_ptevents & PTRACE_LWP)
260		newtd->td_dbgflags |= TDB_BORN;
261
262	/*
263	 * Copy the existing thread VM policy into the new thread.
264	 */
265	vm_domain_policy_localcopy(&newtd->td_vm_dom_policy,
266	    &td->td_vm_dom_policy);
267
268	PROC_UNLOCK(p);
269
270	tidhash_add(newtd);
271
272	thread_lock(newtd);
273	if (rtp != NULL) {
274		if (!(td->td_pri_class == PRI_TIMESHARE &&
275		      rtp->type == RTP_PRIO_NORMAL)) {
276			rtp_to_pri(rtp, newtd);
277			sched_prio(newtd, newtd->td_user_pri);
278		} /* ignore timesharing class */
279	}
280	TD_SET_CAN_RUN(newtd);
281	sched_add(newtd, SRQ_BORING);
282	thread_unlock(newtd);
283
284	return (0);
285
286fail:
287#ifdef RACCT
288	if (racct_enable) {
289		PROC_LOCK(p);
290		racct_sub(p, RACCT_NTHR, 1);
291		PROC_UNLOCK(p);
292	}
293#endif
294	return (error);
295}
296
297int
298sys_thr_self(struct thread *td, struct thr_self_args *uap)
299    /* long *id */
300{
301	int error;
302
303	error = suword_lwpid(uap->id, (unsigned)td->td_tid);
304	if (error == -1)
305		return (EFAULT);
306	return (0);
307}
308
309int
310sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
311    /* long *state */
312{
313
314	umtx_thread_exit(td);
315
316	/* Signal userland that it can free the stack. */
317	if ((void *)uap->state != NULL) {
318		suword_lwpid(uap->state, 1);
319		kern_umtx_wake(td, uap->state, INT_MAX, 0);
320	}
321
322	return (kern_thr_exit(td));
323}
324
325int
326kern_thr_exit(struct thread *td)
327{
328	struct proc *p;
329
330	p = td->td_proc;
331
332	/*
333	 * If all of the threads in a process call this routine to
334	 * exit (e.g. all threads call pthread_exit()), exactly one
335	 * thread should return to the caller to terminate the process
336	 * instead of the thread.
337	 *
338	 * Checking p_numthreads alone is not sufficient since threads
339	 * might be committed to terminating while the PROC_LOCK is
340	 * dropped in either ptracestop() or while removing this thread
341	 * from the tidhash.  Instead, the p_pendingexits field holds
342	 * the count of threads in either of those states and a thread
343	 * is considered the "last" thread if all of the other threads
344	 * in a process are already terminating.
345	 */
346	PROC_LOCK(p);
347	if (p->p_numthreads == p->p_pendingexits + 1) {
348		/*
349		 * Ignore attempts to shut down last thread in the
350		 * proc.  This will actually call _exit(2) in the
351		 * usermode trampoline when it returns.
352		 */
353		PROC_UNLOCK(p);
354		return (0);
355	}
356
357	p->p_pendingexits++;
358	td->td_dbgflags |= TDB_EXIT;
359	if (p->p_ptevents & PTRACE_LWP)
360		ptracestop(td, SIGTRAP, NULL);
361	PROC_UNLOCK(p);
362	tidhash_remove(td);
363	PROC_LOCK(p);
364	p->p_pendingexits--;
365
366	/*
367	 * The check above should prevent all other threads from this
368	 * process from exiting while the PROC_LOCK is dropped, so
369	 * there must be at least one other thread other than the
370	 * current thread.
371	 */
372	KASSERT(p->p_numthreads > 1, ("too few threads"));
373	racct_sub(p, RACCT_NTHR, 1);
374	tdsigcleanup(td);
375	PROC_SLOCK(p);
376	thread_stopped(p);
377	thread_exit();
378	/* NOTREACHED */
379}
380
381int
382sys_thr_kill(struct thread *td, struct thr_kill_args *uap)
383    /* long id, int sig */
384{
385	ksiginfo_t ksi;
386	struct thread *ttd;
387	struct proc *p;
388	int error;
389
390	p = td->td_proc;
391	ksiginfo_init(&ksi);
392	ksi.ksi_signo = uap->sig;
393	ksi.ksi_code = SI_LWP;
394	ksi.ksi_pid = p->p_pid;
395	ksi.ksi_uid = td->td_ucred->cr_ruid;
396	if (uap->id == -1) {
397		if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
398			error = EINVAL;
399		} else {
400			error = ESRCH;
401			PROC_LOCK(p);
402			FOREACH_THREAD_IN_PROC(p, ttd) {
403				if (ttd != td) {
404					error = 0;
405					if (uap->sig == 0)
406						break;
407					tdksignal(ttd, uap->sig, &ksi);
408				}
409			}
410			PROC_UNLOCK(p);
411		}
412	} else {
413		error = 0;
414		ttd = tdfind((lwpid_t)uap->id, p->p_pid);
415		if (ttd == NULL)
416			return (ESRCH);
417		if (uap->sig == 0)
418			;
419		else if (!_SIG_VALID(uap->sig))
420			error = EINVAL;
421		else
422			tdksignal(ttd, uap->sig, &ksi);
423		PROC_UNLOCK(ttd->td_proc);
424	}
425	return (error);
426}
427
428int
429sys_thr_kill2(struct thread *td, struct thr_kill2_args *uap)
430    /* pid_t pid, long id, int sig */
431{
432	ksiginfo_t ksi;
433	struct thread *ttd;
434	struct proc *p;
435	int error;
436
437	AUDIT_ARG_SIGNUM(uap->sig);
438
439	ksiginfo_init(&ksi);
440	ksi.ksi_signo = uap->sig;
441	ksi.ksi_code = SI_LWP;
442	ksi.ksi_pid = td->td_proc->p_pid;
443	ksi.ksi_uid = td->td_ucred->cr_ruid;
444	if (uap->id == -1) {
445		if ((p = pfind(uap->pid)) == NULL)
446			return (ESRCH);
447		AUDIT_ARG_PROCESS(p);
448		error = p_cansignal(td, p, uap->sig);
449		if (error) {
450			PROC_UNLOCK(p);
451			return (error);
452		}
453		if (uap->sig != 0 && !_SIG_VALID(uap->sig)) {
454			error = EINVAL;
455		} else {
456			error = ESRCH;
457			FOREACH_THREAD_IN_PROC(p, ttd) {
458				if (ttd != td) {
459					error = 0;
460					if (uap->sig == 0)
461						break;
462					tdksignal(ttd, uap->sig, &ksi);
463				}
464			}
465		}
466		PROC_UNLOCK(p);
467	} else {
468		ttd = tdfind((lwpid_t)uap->id, uap->pid);
469		if (ttd == NULL)
470			return (ESRCH);
471		p = ttd->td_proc;
472		AUDIT_ARG_PROCESS(p);
473		error = p_cansignal(td, p, uap->sig);
474		if (uap->sig == 0)
475			;
476		else if (!_SIG_VALID(uap->sig))
477			error = EINVAL;
478		else
479			tdksignal(ttd, uap->sig, &ksi);
480		PROC_UNLOCK(p);
481	}
482	return (error);
483}
484
485int
486sys_thr_suspend(struct thread *td, struct thr_suspend_args *uap)
487	/* const struct timespec *timeout */
488{
489	struct timespec ts, *tsp;
490	int error;
491
492	tsp = NULL;
493	if (uap->timeout != NULL) {
494		error = umtx_copyin_timeout(uap->timeout, &ts);
495		if (error != 0)
496			return (error);
497		tsp = &ts;
498	}
499
500	return (kern_thr_suspend(td, tsp));
501}
502
503int
504kern_thr_suspend(struct thread *td, struct timespec *tsp)
505{
506	struct proc *p = td->td_proc;
507	struct timeval tv;
508	int error = 0;
509	int timo = 0;
510
511	if (td->td_pflags & TDP_WAKEUP) {
512		td->td_pflags &= ~TDP_WAKEUP;
513		return (0);
514	}
515
516	if (tsp != NULL) {
517		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
518			error = EWOULDBLOCK;
519		else {
520			TIMESPEC_TO_TIMEVAL(&tv, tsp);
521			timo = tvtohz(&tv);
522		}
523	}
524
525	PROC_LOCK(p);
526	if (error == 0 && (td->td_flags & TDF_THRWAKEUP) == 0)
527		error = msleep((void *)td, &p->p_mtx,
528			 PCATCH, "lthr", timo);
529
530	if (td->td_flags & TDF_THRWAKEUP) {
531		thread_lock(td);
532		td->td_flags &= ~TDF_THRWAKEUP;
533		thread_unlock(td);
534		PROC_UNLOCK(p);
535		return (0);
536	}
537	PROC_UNLOCK(p);
538	if (error == EWOULDBLOCK)
539		error = ETIMEDOUT;
540	else if (error == ERESTART) {
541		if (timo != 0)
542			error = EINTR;
543	}
544	return (error);
545}
546
547int
548sys_thr_wake(struct thread *td, struct thr_wake_args *uap)
549	/* long id */
550{
551	struct proc *p;
552	struct thread *ttd;
553
554	if (uap->id == td->td_tid) {
555		td->td_pflags |= TDP_WAKEUP;
556		return (0);
557	}
558
559	p = td->td_proc;
560	ttd = tdfind((lwpid_t)uap->id, p->p_pid);
561	if (ttd == NULL)
562		return (ESRCH);
563	thread_lock(ttd);
564	ttd->td_flags |= TDF_THRWAKEUP;
565	thread_unlock(ttd);
566	wakeup((void *)ttd);
567	PROC_UNLOCK(p);
568	return (0);
569}
570
571int
572sys_thr_set_name(struct thread *td, struct thr_set_name_args *uap)
573{
574	struct proc *p;
575	char name[MAXCOMLEN + 1];
576	struct thread *ttd;
577	int error;
578
579	error = 0;
580	name[0] = '\0';
581	if (uap->name != NULL) {
582		error = copyinstr(uap->name, name, sizeof(name), NULL);
583		if (error == ENAMETOOLONG) {
584			error = copyin(uap->name, name, sizeof(name) - 1);
585			name[sizeof(name) - 1] = '\0';
586		}
587		if (error)
588			return (error);
589	}
590	p = td->td_proc;
591	ttd = tdfind((lwpid_t)uap->id, p->p_pid);
592	if (ttd == NULL)
593		return (ESRCH);
594	strcpy(ttd->td_name, name);
595#ifdef KTR
596	sched_clear_tdname(ttd);
597#endif
598	PROC_UNLOCK(p);
599	return (error);
600}
601
602int
603kern_thr_alloc(struct proc *p, int pages, struct thread **ntd)
604{
605
606	/* Have race condition but it is cheap. */
607	if (p->p_numthreads >= max_threads_per_proc) {
608		++max_threads_hits;
609		return (EPROCLIM);
610	}
611
612	*ntd = thread_alloc(pages);
613	if (*ntd == NULL)
614		return (ENOMEM);
615
616	return (0);
617}
618