1/*	$NetBSD$	*/
2
3/*-
4 * Copyright (c) 2001, 2006, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32/*
33 * Lightweight process (LWP) system calls.  See kern_lwp.c for a description
34 * of LWPs.
35 */
36
37#include <sys/cdefs.h>
38__KERNEL_RCSID(0, "$NetBSD$");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/pool.h>
43#include <sys/proc.h>
44#include <sys/types.h>
45#include <sys/syscallargs.h>
46#include <sys/kauth.h>
47#include <sys/kmem.h>
48#include <sys/sleepq.h>
49#include <sys/lwpctl.h>
50#include <sys/cpu.h>
51
52#include <uvm/uvm_extern.h>
53
54#include "opt_sa.h"
55
56#define	LWP_UNPARK_MAX		1024
57
58static syncobj_t lwp_park_sobj = {
59	SOBJ_SLEEPQ_LIFO,
60	sleepq_unsleep,
61	sleepq_changepri,
62	sleepq_lendpri,
63	syncobj_noowner,
64};
65
66static sleeptab_t	lwp_park_tab;
67
68void
69lwp_sys_init(void)
70{
71	sleeptab_init(&lwp_park_tab);
72}
73
74int
75do_lwp_create(lwp_t *l, void *arg, u_long flags, lwpid_t *new_lwp)
76{
77 	struct proc *p = l->l_proc;
78 	struct lwp *l2;
79 	struct schedstate_percpu *spc;
80 	vaddr_t uaddr;
81	int error;
82
83#ifdef KERN_SA
84	mutex_enter(p->p_lock);
85	if ((p->p_sflag & (PS_SA | PS_WEXIT)) != 0 || p->p_sa != NULL) {
86		mutex_exit(p->p_lock);
87		return EINVAL;
88	}
89	mutex_exit(p->p_lock);
90#endif
91
92	/* XXX check against resource limits */
93
94	uaddr = uvm_uarea_alloc();
95	if (__predict_false(uaddr == 0))
96		return ENOMEM;
97
98	error = lwp_create(l, p, uaddr, flags & LWP_DETACHED,
99	    NULL, 0, p->p_emul->e_startlwp, arg, &l2, l->l_class);
100	if (__predict_false(error)) {
101		uvm_uarea_free(uaddr);
102		return error;
103	}
104
105	*new_lwp = l2->l_lid;
106
107	/*
108	 * Set the new LWP running, unless the caller has requested that
109	 * it be created in suspended state.  If the process is stopping,
110	 * then the LWP is created stopped.
111	 */
112	mutex_enter(p->p_lock);
113	lwp_lock(l2);
114	spc = &l2->l_cpu->ci_schedstate;
115	if ((flags & LWP_SUSPENDED) == 0 &&
116	    (l->l_flag & (LW_WREBOOT | LW_WSUSPEND | LW_WEXIT)) == 0) {
117	    	if (p->p_stat == SSTOP || (p->p_sflag & PS_STOPPING) != 0) {
118			KASSERT(l2->l_wchan == NULL);
119	    		l2->l_stat = LSSTOP;
120			p->p_nrlwps--;
121			lwp_unlock_to(l2, spc->spc_lwplock);
122		} else {
123			KASSERT(lwp_locked(l2, spc->spc_mutex));
124			l2->l_stat = LSRUN;
125			sched_enqueue(l2, false);
126			lwp_unlock(l2);
127		}
128	} else {
129		l2->l_stat = LSSUSPENDED;
130		p->p_nrlwps--;
131		lwp_unlock_to(l2, spc->spc_lwplock);
132	}
133	mutex_exit(p->p_lock);
134
135	return 0;
136}
137
138int
139sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap,
140    register_t *retval)
141{
142	/* {
143		syscallarg(const ucontext_t *) ucp;
144		syscallarg(u_long) flags;
145		syscallarg(lwpid_t *) new_lwp;
146	} */
147	struct proc *p = l->l_proc;
148	ucontext_t *newuc = NULL;
149	lwpid_t lid;
150	int error;
151
152	newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP);
153	error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize);
154	if (error)
155		goto fail;
156
157	/* validate the ucontext */
158	if ((newuc->uc_flags & _UC_CPU) == 0) {
159		error = EINVAL;
160		goto fail;
161	}
162	error = cpu_mcontext_validate(l, &newuc->uc_mcontext);
163	if (error)
164		goto fail;
165
166	error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid);
167	if (error)
168		goto fail;
169
170	/*
171	 * do not free ucontext in case of an error here,
172	 * the lwp will actually run and access it
173	 */
174	return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid));
175
176fail:
177	kmem_free(newuc, sizeof(ucontext_t));
178	return error;
179}
180
181int
182sys__lwp_exit(struct lwp *l, const void *v, register_t *retval)
183{
184
185	lwp_exit(l);
186	return 0;
187}
188
189int
190sys__lwp_self(struct lwp *l, const void *v, register_t *retval)
191{
192
193	*retval = l->l_lid;
194	return 0;
195}
196
197int
198sys__lwp_getprivate(struct lwp *l, const void *v, register_t *retval)
199{
200
201	*retval = (uintptr_t)l->l_private;
202	return 0;
203}
204
205int
206sys__lwp_setprivate(struct lwp *l, const struct sys__lwp_setprivate_args *uap,
207    register_t *retval)
208{
209	/* {
210		syscallarg(void *) ptr;
211	} */
212
213	return lwp_setprivate(l, SCARG(uap, ptr));
214}
215
216int
217sys__lwp_suspend(struct lwp *l, const struct sys__lwp_suspend_args *uap,
218    register_t *retval)
219{
220	/* {
221		syscallarg(lwpid_t) target;
222	} */
223	struct proc *p = l->l_proc;
224	struct lwp *t;
225	int error;
226
227	mutex_enter(p->p_lock);
228
229#ifdef KERN_SA
230	if ((p->p_sflag & PS_SA) != 0 || p->p_sa != NULL) {
231		mutex_exit(p->p_lock);
232		return EINVAL;
233	}
234#endif
235
236	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
237		mutex_exit(p->p_lock);
238		return ESRCH;
239	}
240
241	/*
242	 * Check for deadlock, which is only possible when we're suspending
243	 * ourself.  XXX There is a short race here, as p_nrlwps is only
244	 * incremented when an LWP suspends itself on the kernel/user
245	 * boundary.  It's still possible to kill -9 the process so we
246	 * don't bother checking further.
247	 */
248	lwp_lock(t);
249	if ((t == l && p->p_nrlwps == 1) ||
250	    (l->l_flag & (LW_WCORE | LW_WEXIT)) != 0) {
251		lwp_unlock(t);
252		mutex_exit(p->p_lock);
253		return EDEADLK;
254	}
255
256	/*
257	 * Suspend the LWP.  XXX If it's on a different CPU, we should wait
258	 * for it to be preempted, where it will put itself to sleep.
259	 *
260	 * Suspension of the current LWP will happen on return to userspace.
261	 */
262	error = lwp_suspend(l, t);
263	if (error) {
264		mutex_exit(p->p_lock);
265		return error;
266	}
267
268	/*
269	 * Wait for:
270	 *  o process exiting
271	 *  o target LWP suspended
272	 *  o target LWP not suspended and L_WSUSPEND clear
273	 *  o target LWP exited
274	 */
275	for (;;) {
276		error = cv_wait_sig(&p->p_lwpcv, p->p_lock);
277		if (error) {
278			error = ERESTART;
279			break;
280		}
281		if (lwp_find(p, SCARG(uap, target)) == NULL) {
282			error = ESRCH;
283			break;
284		}
285		if ((l->l_flag | t->l_flag) & (LW_WCORE | LW_WEXIT)) {
286			error = ERESTART;
287			break;
288		}
289		if (t->l_stat == LSSUSPENDED ||
290		    (t->l_flag & LW_WSUSPEND) == 0)
291			break;
292	}
293	mutex_exit(p->p_lock);
294
295	return error;
296}
297
298int
299sys__lwp_continue(struct lwp *l, const struct sys__lwp_continue_args *uap,
300    register_t *retval)
301{
302	/* {
303		syscallarg(lwpid_t) target;
304	} */
305	int error;
306	struct proc *p = l->l_proc;
307	struct lwp *t;
308
309	error = 0;
310
311	mutex_enter(p->p_lock);
312	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
313		mutex_exit(p->p_lock);
314		return ESRCH;
315	}
316
317	lwp_lock(t);
318	lwp_continue(t);
319	mutex_exit(p->p_lock);
320
321	return error;
322}
323
324int
325sys__lwp_wakeup(struct lwp *l, const struct sys__lwp_wakeup_args *uap,
326    register_t *retval)
327{
328	/* {
329		syscallarg(lwpid_t) target;
330	} */
331	struct lwp *t;
332	struct proc *p;
333	int error;
334
335	p = l->l_proc;
336	mutex_enter(p->p_lock);
337
338	if ((t = lwp_find(p, SCARG(uap, target))) == NULL) {
339		mutex_exit(p->p_lock);
340		return ESRCH;
341	}
342
343	lwp_lock(t);
344	t->l_flag |= (LW_CANCELLED | LW_UNPARKED);
345
346	if (t->l_stat != LSSLEEP) {
347		lwp_unlock(t);
348		error = ENODEV;
349	} else if ((t->l_flag & LW_SINTR) == 0) {
350		lwp_unlock(t);
351		error = EBUSY;
352	} else {
353		/* Wake it up.  lwp_unsleep() will release the LWP lock. */
354		lwp_unsleep(t, true);
355		error = 0;
356	}
357
358	mutex_exit(p->p_lock);
359
360	return error;
361}
362
363int
364sys__lwp_wait(struct lwp *l, const struct sys__lwp_wait_args *uap,
365    register_t *retval)
366{
367	/* {
368		syscallarg(lwpid_t) wait_for;
369		syscallarg(lwpid_t *) departed;
370	} */
371	struct proc *p = l->l_proc;
372	int error;
373	lwpid_t dep;
374
375	mutex_enter(p->p_lock);
376	error = lwp_wait(l, SCARG(uap, wait_for), &dep, false);
377	mutex_exit(p->p_lock);
378
379	if (!error && SCARG(uap, departed)) {
380		error = copyout(&dep, SCARG(uap, departed), sizeof(dep));
381	}
382
383	return error;
384}
385
386int
387sys__lwp_kill(struct lwp *l, const struct sys__lwp_kill_args *uap,
388    register_t *retval)
389{
390	/* {
391		syscallarg(lwpid_t)	target;
392		syscallarg(int)		signo;
393	} */
394	struct proc *p = l->l_proc;
395	struct lwp *t;
396	ksiginfo_t ksi;
397	int signo = SCARG(uap, signo);
398	int error = 0;
399
400	if ((u_int)signo >= NSIG)
401		return EINVAL;
402
403	KSI_INIT(&ksi);
404	ksi.ksi_signo = signo;
405	ksi.ksi_code = SI_LWP;
406	ksi.ksi_pid = p->p_pid;
407	ksi.ksi_uid = kauth_cred_geteuid(l->l_cred);
408	ksi.ksi_lid = SCARG(uap, target);
409
410	mutex_enter(proc_lock);
411	mutex_enter(p->p_lock);
412	if ((t = lwp_find(p, ksi.ksi_lid)) == NULL)
413		error = ESRCH;
414	else if (signo != 0)
415		kpsignal2(p, &ksi);
416	mutex_exit(p->p_lock);
417	mutex_exit(proc_lock);
418
419	return error;
420}
421
422int
423sys__lwp_detach(struct lwp *l, const struct sys__lwp_detach_args *uap,
424    register_t *retval)
425{
426	/* {
427		syscallarg(lwpid_t)	target;
428	} */
429	struct proc *p;
430	struct lwp *t;
431	lwpid_t target;
432	int error;
433
434	target = SCARG(uap, target);
435	p = l->l_proc;
436
437	mutex_enter(p->p_lock);
438
439	if (l->l_lid == target)
440		t = l;
441	else {
442		/*
443		 * We can't use lwp_find() here because the target might
444		 * be a zombie.
445		 */
446		LIST_FOREACH(t, &p->p_lwps, l_sibling)
447			if (t->l_lid == target)
448				break;
449	}
450
451	/*
452	 * If the LWP is already detached, there's nothing to do.
453	 * If it's a zombie, we need to clean up after it.  LSZOMB
454	 * is visible with the proc mutex held.
455	 *
456	 * After we have detached or released the LWP, kick any
457	 * other LWPs that may be sitting in _lwp_wait(), waiting
458	 * for the target LWP to exit.
459	 */
460	if (t != NULL && t->l_stat != LSIDL) {
461		if ((t->l_prflag & LPR_DETACHED) == 0) {
462			p->p_ndlwps++;
463			t->l_prflag |= LPR_DETACHED;
464			if (t->l_stat == LSZOMB) {
465				/* Releases proc mutex. */
466				lwp_free(t, false, false);
467				return 0;
468			}
469			error = 0;
470
471			/*
472			 * Have any LWPs sleeping in lwp_wait() recheck
473			 * for deadlock.
474			 */
475			cv_broadcast(&p->p_lwpcv);
476		} else
477			error = EINVAL;
478	} else
479		error = ESRCH;
480
481	mutex_exit(p->p_lock);
482
483	return error;
484}
485
486static inline wchan_t
487lwp_park_wchan(struct proc *p, const void *hint)
488{
489
490	return (wchan_t)((uintptr_t)p ^ (uintptr_t)hint);
491}
492
493int
494lwp_unpark(lwpid_t target, const void *hint)
495{
496	sleepq_t *sq;
497	wchan_t wchan;
498	kmutex_t *mp;
499	proc_t *p;
500	lwp_t *t;
501
502	/*
503	 * Easy case: search for the LWP on the sleep queue.  If
504	 * it's parked, remove it from the queue and set running.
505	 */
506	p = curproc;
507	wchan = lwp_park_wchan(p, hint);
508	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
509
510	TAILQ_FOREACH(t, sq, l_sleepchain)
511		if (t->l_proc == p && t->l_lid == target)
512			break;
513
514	if (__predict_true(t != NULL)) {
515		sleepq_remove(sq, t);
516		mutex_spin_exit(mp);
517		return 0;
518	}
519
520	/*
521	 * The LWP hasn't parked yet.  Take the hit and mark the
522	 * operation as pending.
523	 */
524	mutex_spin_exit(mp);
525
526	mutex_enter(p->p_lock);
527	if ((t = lwp_find(p, target)) == NULL) {
528		mutex_exit(p->p_lock);
529		return ESRCH;
530	}
531
532	/*
533	 * It may not have parked yet, we may have raced, or it
534	 * is parked on a different user sync object.
535	 */
536	lwp_lock(t);
537	if (t->l_syncobj == &lwp_park_sobj) {
538		/* Releases the LWP lock. */
539		lwp_unsleep(t, true);
540	} else {
541		/*
542		 * Set the operation pending.  The next call to _lwp_park
543		 * will return early.
544		 */
545		t->l_flag |= LW_UNPARKED;
546		lwp_unlock(t);
547	}
548
549	mutex_exit(p->p_lock);
550	return 0;
551}
552
553int
554lwp_park(struct timespec *ts, const void *hint)
555{
556	sleepq_t *sq;
557	kmutex_t *mp;
558	wchan_t wchan;
559	int timo, error;
560	lwp_t *l;
561
562	/* Fix up the given timeout value. */
563	if (ts != NULL) {
564		error = abstimeout2timo(ts, &timo);
565		if (error) {
566			return error;
567		}
568		KASSERT(timo != 0);
569	} else {
570		timo = 0;
571	}
572
573	/* Find and lock the sleep queue. */
574	l = curlwp;
575	wchan = lwp_park_wchan(l->l_proc, hint);
576	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
577
578	/*
579	 * Before going the full route and blocking, check to see if an
580	 * unpark op is pending.
581	 */
582	lwp_lock(l);
583	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
584		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
585		lwp_unlock(l);
586		mutex_spin_exit(mp);
587		return EALREADY;
588	}
589	lwp_unlock_to(l, mp);
590	l->l_biglocks = 0;
591	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
592	error = sleepq_block(timo, true);
593	switch (error) {
594	case EWOULDBLOCK:
595		error = ETIMEDOUT;
596		break;
597	case ERESTART:
598		error = EINTR;
599		break;
600	default:
601		/* nothing */
602		break;
603	}
604	return error;
605}
606
607/*
608 * 'park' an LWP waiting on a user-level synchronisation object.  The LWP
609 * will remain parked until another LWP in the same process calls in and
610 * requests that it be unparked.
611 */
612int
613sys____lwp_park50(struct lwp *l, const struct sys____lwp_park50_args *uap,
614    register_t *retval)
615{
616	/* {
617		syscallarg(const struct timespec *)	ts;
618		syscallarg(lwpid_t)			unpark;
619		syscallarg(const void *)		hint;
620		syscallarg(const void *)		unparkhint;
621	} */
622	struct timespec ts, *tsp;
623	int error;
624
625	if (SCARG(uap, ts) == NULL)
626		tsp = NULL;
627	else {
628		error = copyin(SCARG(uap, ts), &ts, sizeof(ts));
629		if (error != 0)
630			return error;
631		tsp = &ts;
632	}
633
634	if (SCARG(uap, unpark) != 0) {
635		error = lwp_unpark(SCARG(uap, unpark), SCARG(uap, unparkhint));
636		if (error != 0)
637			return error;
638	}
639
640	return lwp_park(tsp, SCARG(uap, hint));
641}
642
643int
644sys__lwp_unpark(struct lwp *l, const struct sys__lwp_unpark_args *uap,
645    register_t *retval)
646{
647	/* {
648		syscallarg(lwpid_t)		target;
649		syscallarg(const void *)	hint;
650	} */
651
652	return lwp_unpark(SCARG(uap, target), SCARG(uap, hint));
653}
654
655int
656sys__lwp_unpark_all(struct lwp *l, const struct sys__lwp_unpark_all_args *uap,
657    register_t *retval)
658{
659	/* {
660		syscallarg(const lwpid_t *)	targets;
661		syscallarg(size_t)		ntargets;
662		syscallarg(const void *)	hint;
663	} */
664	struct proc *p;
665	struct lwp *t;
666	sleepq_t *sq;
667	wchan_t wchan;
668	lwpid_t targets[32], *tp, *tpp, *tmax, target;
669	int error;
670	kmutex_t *mp;
671	u_int ntargets;
672	size_t sz;
673
674	p = l->l_proc;
675	ntargets = SCARG(uap, ntargets);
676
677	if (SCARG(uap, targets) == NULL) {
678		/*
679		 * Let the caller know how much we are willing to do, and
680		 * let it unpark the LWPs in blocks.
681		 */
682		*retval = LWP_UNPARK_MAX;
683		return 0;
684	}
685	if (ntargets > LWP_UNPARK_MAX || ntargets == 0)
686		return EINVAL;
687
688	/*
689	 * Copy in the target array.  If it's a small number of LWPs, then
690	 * place the numbers on the stack.
691	 */
692	sz = sizeof(target) * ntargets;
693	if (sz <= sizeof(targets))
694		tp = targets;
695	else {
696		tp = kmem_alloc(sz, KM_SLEEP);
697		if (tp == NULL)
698			return ENOMEM;
699	}
700	error = copyin(SCARG(uap, targets), tp, sz);
701	if (error != 0) {
702		if (tp != targets) {
703			kmem_free(tp, sz);
704		}
705		return error;
706	}
707
708	wchan = lwp_park_wchan(p, SCARG(uap, hint));
709	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);
710
711	for (tmax = tp + ntargets, tpp = tp; tpp < tmax; tpp++) {
712		target = *tpp;
713
714		/*
715		 * Easy case: search for the LWP on the sleep queue.  If
716		 * it's parked, remove it from the queue and set running.
717		 */
718		TAILQ_FOREACH(t, sq, l_sleepchain)
719			if (t->l_proc == p && t->l_lid == target)
720				break;
721
722		if (t != NULL) {
723			sleepq_remove(sq, t);
724			continue;
725		}
726
727		/*
728		 * The LWP hasn't parked yet.  Take the hit and
729		 * mark the operation as pending.
730		 */
731		mutex_spin_exit(mp);
732		mutex_enter(p->p_lock);
733		if ((t = lwp_find(p, target)) == NULL) {
734			mutex_exit(p->p_lock);
735			mutex_spin_enter(mp);
736			continue;
737		}
738		lwp_lock(t);
739
740		/*
741		 * It may not have parked yet, we may have raced, or
742		 * it is parked on a different user sync object.
743		 */
744		if (t->l_syncobj == &lwp_park_sobj) {
745			/* Releases the LWP lock. */
746			lwp_unsleep(t, true);
747		} else {
748			/*
749			 * Set the operation pending.  The next call to
750			 * _lwp_park will return early.
751			 */
752			t->l_flag |= LW_UNPARKED;
753			lwp_unlock(t);
754		}
755
756		mutex_exit(p->p_lock);
757		mutex_spin_enter(mp);
758	}
759
760	mutex_spin_exit(mp);
761	if (tp != targets)
762		kmem_free(tp, sz);
763
764	return 0;
765}
766
767int
768sys__lwp_setname(struct lwp *l, const struct sys__lwp_setname_args *uap,
769    register_t *retval)
770{
771	/* {
772		syscallarg(lwpid_t)		target;
773		syscallarg(const char *)	name;
774	} */
775	char *name, *oname;
776	lwpid_t target;
777	proc_t *p;
778	lwp_t *t;
779	int error;
780
781	if ((target = SCARG(uap, target)) == 0)
782		target = l->l_lid;
783
784	name = kmem_alloc(MAXCOMLEN, KM_SLEEP);
785	if (name == NULL)
786		return ENOMEM;
787	error = copyinstr(SCARG(uap, name), name, MAXCOMLEN, NULL);
788	switch (error) {
789	case ENAMETOOLONG:
790	case 0:
791		name[MAXCOMLEN - 1] = '\0';
792		break;
793	default:
794		kmem_free(name, MAXCOMLEN);
795		return error;
796	}
797
798	p = curproc;
799	mutex_enter(p->p_lock);
800	if ((t = lwp_find(p, target)) == NULL) {
801		mutex_exit(p->p_lock);
802		kmem_free(name, MAXCOMLEN);
803		return ESRCH;
804	}
805	lwp_lock(t);
806	oname = t->l_name;
807	t->l_name = name;
808	lwp_unlock(t);
809	mutex_exit(p->p_lock);
810
811	if (oname != NULL)
812		kmem_free(oname, MAXCOMLEN);
813
814	return 0;
815}
816
817int
818sys__lwp_getname(struct lwp *l, const struct sys__lwp_getname_args *uap,
819    register_t *retval)
820{
821	/* {
822		syscallarg(lwpid_t)		target;
823		syscallarg(char *)		name;
824		syscallarg(size_t)		len;
825	} */
826	char name[MAXCOMLEN];
827	lwpid_t target;
828	proc_t *p;
829	lwp_t *t;
830
831	if ((target = SCARG(uap, target)) == 0)
832		target = l->l_lid;
833
834	p = curproc;
835	mutex_enter(p->p_lock);
836	if ((t = lwp_find(p, target)) == NULL) {
837		mutex_exit(p->p_lock);
838		return ESRCH;
839	}
840	lwp_lock(t);
841	if (t->l_name == NULL)
842		name[0] = '\0';
843	else
844		strcpy(name, t->l_name);
845	lwp_unlock(t);
846	mutex_exit(p->p_lock);
847
848	return copyoutstr(name, SCARG(uap, name), SCARG(uap, len), NULL);
849}
850
851int
852sys__lwp_ctl(struct lwp *l, const struct sys__lwp_ctl_args *uap,
853    register_t *retval)
854{
855	/* {
856		syscallarg(int)			features;
857		syscallarg(struct lwpctl **)	address;
858	} */
859	int error, features;
860	vaddr_t vaddr;
861
862	features = SCARG(uap, features);
863	features &= ~(LWPCTL_FEATURE_CURCPU | LWPCTL_FEATURE_PCTR);
864	if (features != 0)
865		return ENODEV;
866	if ((error = lwp_ctl_alloc(&vaddr)) != 0)
867		return error;
868	return copyout(&vaddr, SCARG(uap, address), sizeof(void *));
869}
870