1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD$");
39
40#include "opt_compat.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/sysproto.h>
45#include <sys/file.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/refcount.h>
53#include <sys/racct.h>
54#include <sys/resourcevar.h>
55#include <sys/rwlock.h>
56#include <sys/sched.h>
57#include <sys/sx.h>
58#include <sys/syscallsubr.h>
59#include <sys/sysctl.h>
60#include <sys/sysent.h>
61#include <sys/time.h>
62#include <sys/umtx.h>
63
64#include <vm/vm.h>
65#include <vm/vm_param.h>
66#include <vm/pmap.h>
67#include <vm/vm_map.h>
68
69
70static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
71static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
72#define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
73static struct rwlock uihashtbl_lock;
74static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
75static u_long uihash;		/* size of hash table - 1 */
76
77static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
78		    struct timeval *up, struct timeval *sp);
79static int	donice(struct thread *td, struct proc *chgp, int n);
80static struct uidinfo *uilookup(uid_t uid);
81static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
82
83/*
84 * Resource controls and accounting.
85 */
86#ifndef _SYS_SYSPROTO_H_
87struct getpriority_args {
88	int	which;
89	int	who;
90};
91#endif
92int
93sys_getpriority(td, uap)
94	struct thread *td;
95	register struct getpriority_args *uap;
96{
97	struct proc *p;
98	struct pgrp *pg;
99	int error, low;
100
101	error = 0;
102	low = PRIO_MAX + 1;
103	switch (uap->which) {
104
105	case PRIO_PROCESS:
106		if (uap->who == 0)
107			low = td->td_proc->p_nice;
108		else {
109			p = pfind(uap->who);
110			if (p == NULL)
111				break;
112			if (p_cansee(td, p) == 0)
113				low = p->p_nice;
114			PROC_UNLOCK(p);
115		}
116		break;
117
118	case PRIO_PGRP:
119		sx_slock(&proctree_lock);
120		if (uap->who == 0) {
121			pg = td->td_proc->p_pgrp;
122			PGRP_LOCK(pg);
123		} else {
124			pg = pgfind(uap->who);
125			if (pg == NULL) {
126				sx_sunlock(&proctree_lock);
127				break;
128			}
129		}
130		sx_sunlock(&proctree_lock);
131		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
132			PROC_LOCK(p);
133			if (p->p_state == PRS_NORMAL &&
134			    p_cansee(td, p) == 0) {
135				if (p->p_nice < low)
136					low = p->p_nice;
137			}
138			PROC_UNLOCK(p);
139		}
140		PGRP_UNLOCK(pg);
141		break;
142
143	case PRIO_USER:
144		if (uap->who == 0)
145			uap->who = td->td_ucred->cr_uid;
146		sx_slock(&allproc_lock);
147		FOREACH_PROC_IN_SYSTEM(p) {
148			PROC_LOCK(p);
149			if (p->p_state == PRS_NORMAL &&
150			    p_cansee(td, p) == 0 &&
151			    p->p_ucred->cr_uid == uap->who) {
152				if (p->p_nice < low)
153					low = p->p_nice;
154			}
155			PROC_UNLOCK(p);
156		}
157		sx_sunlock(&allproc_lock);
158		break;
159
160	default:
161		error = EINVAL;
162		break;
163	}
164	if (low == PRIO_MAX + 1 && error == 0)
165		error = ESRCH;
166	td->td_retval[0] = low;
167	return (error);
168}
169
170#ifndef _SYS_SYSPROTO_H_
171struct setpriority_args {
172	int	which;
173	int	who;
174	int	prio;
175};
176#endif
177int
178sys_setpriority(td, uap)
179	struct thread *td;
180	struct setpriority_args *uap;
181{
182	struct proc *curp, *p;
183	struct pgrp *pg;
184	int found = 0, error = 0;
185
186	curp = td->td_proc;
187	switch (uap->which) {
188	case PRIO_PROCESS:
189		if (uap->who == 0) {
190			PROC_LOCK(curp);
191			error = donice(td, curp, uap->prio);
192			PROC_UNLOCK(curp);
193		} else {
194			p = pfind(uap->who);
195			if (p == NULL)
196				break;
197			error = p_cansee(td, p);
198			if (error == 0)
199				error = donice(td, p, uap->prio);
200			PROC_UNLOCK(p);
201		}
202		found++;
203		break;
204
205	case PRIO_PGRP:
206		sx_slock(&proctree_lock);
207		if (uap->who == 0) {
208			pg = curp->p_pgrp;
209			PGRP_LOCK(pg);
210		} else {
211			pg = pgfind(uap->who);
212			if (pg == NULL) {
213				sx_sunlock(&proctree_lock);
214				break;
215			}
216		}
217		sx_sunlock(&proctree_lock);
218		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
219			PROC_LOCK(p);
220			if (p->p_state == PRS_NORMAL &&
221			    p_cansee(td, p) == 0) {
222				error = donice(td, p, uap->prio);
223				found++;
224			}
225			PROC_UNLOCK(p);
226		}
227		PGRP_UNLOCK(pg);
228		break;
229
230	case PRIO_USER:
231		if (uap->who == 0)
232			uap->who = td->td_ucred->cr_uid;
233		sx_slock(&allproc_lock);
234		FOREACH_PROC_IN_SYSTEM(p) {
235			PROC_LOCK(p);
236			if (p->p_state == PRS_NORMAL &&
237			    p->p_ucred->cr_uid == uap->who &&
238			    p_cansee(td, p) == 0) {
239				error = donice(td, p, uap->prio);
240				found++;
241			}
242			PROC_UNLOCK(p);
243		}
244		sx_sunlock(&allproc_lock);
245		break;
246
247	default:
248		error = EINVAL;
249		break;
250	}
251	if (found == 0 && error == 0)
252		error = ESRCH;
253	return (error);
254}
255
256/*
257 * Set "nice" for a (whole) process.
258 */
259static int
260donice(struct thread *td, struct proc *p, int n)
261{
262	int error;
263
264	PROC_LOCK_ASSERT(p, MA_OWNED);
265	if ((error = p_cansched(td, p)))
266		return (error);
267	if (n > PRIO_MAX)
268		n = PRIO_MAX;
269	if (n < PRIO_MIN)
270		n = PRIO_MIN;
271	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
272		return (EACCES);
273	sched_nice(p, n);
274	return (0);
275}
276
277static int unprivileged_idprio;
278SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
279    &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
280
281/*
282 * Set realtime priority for LWP.
283 */
284#ifndef _SYS_SYSPROTO_H_
285struct rtprio_thread_args {
286	int		function;
287	lwpid_t		lwpid;
288	struct rtprio	*rtp;
289};
290#endif
291int
292sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
293{
294	struct proc *p;
295	struct rtprio rtp;
296	struct thread *td1;
297	int cierror, error;
298
299	/* Perform copyin before acquiring locks if needed. */
300	if (uap->function == RTP_SET)
301		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
302	else
303		cierror = 0;
304
305	if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
306		p = td->td_proc;
307		td1 = td;
308		PROC_LOCK(p);
309	} else {
310		/* Only look up thread in current process */
311		td1 = tdfind(uap->lwpid, curproc->p_pid);
312		if (td1 == NULL)
313			return (ESRCH);
314		p = td1->td_proc;
315	}
316
317	switch (uap->function) {
318	case RTP_LOOKUP:
319		if ((error = p_cansee(td, p)))
320			break;
321		pri_to_rtp(td1, &rtp);
322		PROC_UNLOCK(p);
323		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
324	case RTP_SET:
325		if ((error = p_cansched(td, p)) || (error = cierror))
326			break;
327
328		/* Disallow setting rtprio in most cases if not superuser. */
329
330		/*
331		 * Realtime priority has to be restricted for reasons which
332		 * should be obvious.  However, for idleprio processes, there is
333		 * a potential for system deadlock if an idleprio process gains
334		 * a lock on a resource that other processes need (and the
335		 * idleprio process can't run due to a CPU-bound normal
336		 * process).  Fix me!  XXX
337		 *
338		 * This problem is not only related to idleprio process.
339		 * A user level program can obtain a file lock and hold it
340		 * indefinitely.  Additionally, without idleprio processes it is
341		 * still conceivable that a program with low priority will never
342		 * get to run.  In short, allowing this feature might make it
343		 * easier to lock a resource indefinitely, but it is not the
344		 * only thing that makes it possible.
345		 */
346		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
347		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
348		    unprivileged_idprio == 0)) {
349			error = priv_check(td, PRIV_SCHED_RTPRIO);
350			if (error)
351				break;
352		}
353		error = rtp_to_pri(&rtp, td1);
354		break;
355	default:
356		error = EINVAL;
357		break;
358	}
359	PROC_UNLOCK(p);
360	return (error);
361}
362
363/*
364 * Set realtime priority.
365 */
366#ifndef _SYS_SYSPROTO_H_
367struct rtprio_args {
368	int		function;
369	pid_t		pid;
370	struct rtprio	*rtp;
371};
372#endif
373int
374sys_rtprio(td, uap)
375	struct thread *td;		/* curthread */
376	register struct rtprio_args *uap;
377{
378	struct proc *p;
379	struct thread *tdp;
380	struct rtprio rtp;
381	int cierror, error;
382
383	/* Perform copyin before acquiring locks if needed. */
384	if (uap->function == RTP_SET)
385		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
386	else
387		cierror = 0;
388
389	if (uap->pid == 0) {
390		p = td->td_proc;
391		PROC_LOCK(p);
392	} else {
393		p = pfind(uap->pid);
394		if (p == NULL)
395			return (ESRCH);
396	}
397
398	switch (uap->function) {
399	case RTP_LOOKUP:
400		if ((error = p_cansee(td, p)))
401			break;
402		/*
403		 * Return OUR priority if no pid specified,
404		 * or if one is, report the highest priority
405		 * in the process.  There isn't much more you can do as
406		 * there is only room to return a single priority.
407		 * Note: specifying our own pid is not the same
408		 * as leaving it zero.
409		 */
410		if (uap->pid == 0) {
411			pri_to_rtp(td, &rtp);
412		} else {
413			struct rtprio rtp2;
414
415			rtp.type = RTP_PRIO_IDLE;
416			rtp.prio = RTP_PRIO_MAX;
417			FOREACH_THREAD_IN_PROC(p, tdp) {
418				pri_to_rtp(tdp, &rtp2);
419				if (rtp2.type <  rtp.type ||
420				    (rtp2.type == rtp.type &&
421				    rtp2.prio < rtp.prio)) {
422					rtp.type = rtp2.type;
423					rtp.prio = rtp2.prio;
424				}
425			}
426		}
427		PROC_UNLOCK(p);
428		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
429	case RTP_SET:
430		if ((error = p_cansched(td, p)) || (error = cierror))
431			break;
432
433		/*
434		 * Disallow setting rtprio in most cases if not superuser.
435		 * See the comment in sys_rtprio_thread about idprio
436		 * threads holding a lock.
437		 */
438		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
439		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
440		    !unprivileged_idprio)) {
441			error = priv_check(td, PRIV_SCHED_RTPRIO);
442			if (error)
443				break;
444		}
445
446		/*
447		 * If we are setting our own priority, set just our
448		 * thread but if we are doing another process,
449		 * do all the threads on that process. If we
450		 * specify our own pid we do the latter.
451		 */
452		if (uap->pid == 0) {
453			error = rtp_to_pri(&rtp, td);
454		} else {
455			FOREACH_THREAD_IN_PROC(p, td) {
456				if ((error = rtp_to_pri(&rtp, td)) != 0)
457					break;
458			}
459		}
460		break;
461	default:
462		error = EINVAL;
463		break;
464	}
465	PROC_UNLOCK(p);
466	return (error);
467}
468
469int
470rtp_to_pri(struct rtprio *rtp, struct thread *td)
471{
472	u_char	newpri;
473	u_char	oldpri;
474
475	switch (RTP_PRIO_BASE(rtp->type)) {
476	case RTP_PRIO_REALTIME:
477		if (rtp->prio > RTP_PRIO_MAX)
478			return (EINVAL);
479		newpri = PRI_MIN_REALTIME + rtp->prio;
480		break;
481	case RTP_PRIO_NORMAL:
482		if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
483			return (EINVAL);
484		newpri = PRI_MIN_TIMESHARE + rtp->prio;
485		break;
486	case RTP_PRIO_IDLE:
487		if (rtp->prio > RTP_PRIO_MAX)
488			return (EINVAL);
489		newpri = PRI_MIN_IDLE + rtp->prio;
490		break;
491	default:
492		return (EINVAL);
493	}
494
495	thread_lock(td);
496	sched_class(td, rtp->type);	/* XXX fix */
497	oldpri = td->td_user_pri;
498	sched_user_prio(td, newpri);
499	if (td->td_user_pri != oldpri && (td == curthread ||
500	    td->td_priority == oldpri || td->td_user_pri <= PRI_MAX_REALTIME))
501		sched_prio(td, td->td_user_pri);
502	if (TD_ON_UPILOCK(td) && oldpri != newpri) {
503		critical_enter();
504		thread_unlock(td);
505		umtx_pi_adjust(td, oldpri);
506		critical_exit();
507	} else
508		thread_unlock(td);
509	return (0);
510}
511
512void
513pri_to_rtp(struct thread *td, struct rtprio *rtp)
514{
515
516	thread_lock(td);
517	switch (PRI_BASE(td->td_pri_class)) {
518	case PRI_REALTIME:
519		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
520		break;
521	case PRI_TIMESHARE:
522		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
523		break;
524	case PRI_IDLE:
525		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
526		break;
527	default:
528		break;
529	}
530	rtp->type = td->td_pri_class;
531	thread_unlock(td);
532}
533
534#if defined(COMPAT_43)
535#ifndef _SYS_SYSPROTO_H_
536struct osetrlimit_args {
537	u_int	which;
538	struct	orlimit *rlp;
539};
540#endif
541int
542osetrlimit(td, uap)
543	struct thread *td;
544	register struct osetrlimit_args *uap;
545{
546	struct orlimit olim;
547	struct rlimit lim;
548	int error;
549
550	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
551		return (error);
552	lim.rlim_cur = olim.rlim_cur;
553	lim.rlim_max = olim.rlim_max;
554	error = kern_setrlimit(td, uap->which, &lim);
555	return (error);
556}
557
558#ifndef _SYS_SYSPROTO_H_
559struct ogetrlimit_args {
560	u_int	which;
561	struct	orlimit *rlp;
562};
563#endif
564int
565ogetrlimit(td, uap)
566	struct thread *td;
567	register struct ogetrlimit_args *uap;
568{
569	struct orlimit olim;
570	struct rlimit rl;
571	struct proc *p;
572	int error;
573
574	if (uap->which >= RLIM_NLIMITS)
575		return (EINVAL);
576	p = td->td_proc;
577	PROC_LOCK(p);
578	lim_rlimit(p, uap->which, &rl);
579	PROC_UNLOCK(p);
580
581	/*
582	 * XXX would be more correct to convert only RLIM_INFINITY to the
583	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
584	 * values.  Most 64->32 and 32->16 conversions, including not
585	 * unimportant ones of uids are even more broken than what we
586	 * do here (they blindly truncate).  We don't do this correctly
587	 * here since we have little experience with EOVERFLOW yet.
588	 * Elsewhere, getuid() can't fail...
589	 */
590	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
591	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
592	error = copyout(&olim, uap->rlp, sizeof(olim));
593	return (error);
594}
595#endif /* COMPAT_43 */
596
597#ifndef _SYS_SYSPROTO_H_
598struct __setrlimit_args {
599	u_int	which;
600	struct	rlimit *rlp;
601};
602#endif
603int
604sys_setrlimit(td, uap)
605	struct thread *td;
606	register struct __setrlimit_args *uap;
607{
608	struct rlimit alim;
609	int error;
610
611	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
612		return (error);
613	error = kern_setrlimit(td, uap->which, &alim);
614	return (error);
615}
616
617static void
618lim_cb(void *arg)
619{
620	struct rlimit rlim;
621	struct thread *td;
622	struct proc *p;
623
624	p = arg;
625	PROC_LOCK_ASSERT(p, MA_OWNED);
626	/*
627	 * Check if the process exceeds its cpu resource allocation.  If
628	 * it reaches the max, arrange to kill the process in ast().
629	 */
630	if (p->p_cpulimit == RLIM_INFINITY)
631		return;
632	PROC_SLOCK(p);
633	FOREACH_THREAD_IN_PROC(p, td) {
634		ruxagg(p, td);
635	}
636	PROC_SUNLOCK(p);
637	if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
638		lim_rlimit(p, RLIMIT_CPU, &rlim);
639		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
640			killproc(p, "exceeded maximum CPU limit");
641		} else {
642			if (p->p_cpulimit < rlim.rlim_max)
643				p->p_cpulimit += 5;
644			kern_psignal(p, SIGXCPU);
645		}
646	}
647	if ((p->p_flag & P_WEXIT) == 0)
648		callout_reset(&p->p_limco, hz, lim_cb, p);
649}
650
651int
652kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
653{
654
655	return (kern_proc_setrlimit(td, td->td_proc, which, limp));
656}
657
658int
659kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
660    struct rlimit *limp)
661{
662	struct plimit *newlim, *oldlim;
663	register struct rlimit *alimp;
664	struct rlimit oldssiz;
665	int error;
666
667	if (which >= RLIM_NLIMITS)
668		return (EINVAL);
669
670	/*
671	 * Preserve historical bugs by treating negative limits as unsigned.
672	 */
673	if (limp->rlim_cur < 0)
674		limp->rlim_cur = RLIM_INFINITY;
675	if (limp->rlim_max < 0)
676		limp->rlim_max = RLIM_INFINITY;
677
678	oldssiz.rlim_cur = 0;
679	newlim = lim_alloc();
680	PROC_LOCK(p);
681	oldlim = p->p_limit;
682	alimp = &oldlim->pl_rlimit[which];
683	if (limp->rlim_cur > alimp->rlim_max ||
684	    limp->rlim_max > alimp->rlim_max)
685		if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
686			PROC_UNLOCK(p);
687			lim_free(newlim);
688			return (error);
689		}
690	if (limp->rlim_cur > limp->rlim_max)
691		limp->rlim_cur = limp->rlim_max;
692	lim_copy(newlim, oldlim);
693	alimp = &newlim->pl_rlimit[which];
694
695	switch (which) {
696
697	case RLIMIT_CPU:
698		if (limp->rlim_cur != RLIM_INFINITY &&
699		    p->p_cpulimit == RLIM_INFINITY)
700			callout_reset(&p->p_limco, hz, lim_cb, p);
701		p->p_cpulimit = limp->rlim_cur;
702		break;
703	case RLIMIT_DATA:
704		if (limp->rlim_cur > maxdsiz)
705			limp->rlim_cur = maxdsiz;
706		if (limp->rlim_max > maxdsiz)
707			limp->rlim_max = maxdsiz;
708		break;
709
710	case RLIMIT_STACK:
711		if (limp->rlim_cur > maxssiz)
712			limp->rlim_cur = maxssiz;
713		if (limp->rlim_max > maxssiz)
714			limp->rlim_max = maxssiz;
715		oldssiz = *alimp;
716		if (p->p_sysent->sv_fixlimit != NULL)
717			p->p_sysent->sv_fixlimit(&oldssiz,
718			    RLIMIT_STACK);
719		break;
720
721	case RLIMIT_NOFILE:
722		if (limp->rlim_cur > maxfilesperproc)
723			limp->rlim_cur = maxfilesperproc;
724		if (limp->rlim_max > maxfilesperproc)
725			limp->rlim_max = maxfilesperproc;
726		break;
727
728	case RLIMIT_NPROC:
729		if (limp->rlim_cur > maxprocperuid)
730			limp->rlim_cur = maxprocperuid;
731		if (limp->rlim_max > maxprocperuid)
732			limp->rlim_max = maxprocperuid;
733		if (limp->rlim_cur < 1)
734			limp->rlim_cur = 1;
735		if (limp->rlim_max < 1)
736			limp->rlim_max = 1;
737		break;
738	}
739	if (p->p_sysent->sv_fixlimit != NULL)
740		p->p_sysent->sv_fixlimit(limp, which);
741	*alimp = *limp;
742	p->p_limit = newlim;
743	PROC_UNLOCK(p);
744	lim_free(oldlim);
745
746	if (which == RLIMIT_STACK) {
747		/*
748		 * Stack is allocated to the max at exec time with only
749		 * "rlim_cur" bytes accessible.  If stack limit is going
750		 * up make more accessible, if going down make inaccessible.
751		 */
752		if (limp->rlim_cur != oldssiz.rlim_cur) {
753			vm_offset_t addr;
754			vm_size_t size;
755			vm_prot_t prot;
756
757			if (limp->rlim_cur > oldssiz.rlim_cur) {
758				prot = p->p_sysent->sv_stackprot;
759				size = limp->rlim_cur - oldssiz.rlim_cur;
760				addr = p->p_sysent->sv_usrstack -
761				    limp->rlim_cur;
762			} else {
763				prot = VM_PROT_NONE;
764				size = oldssiz.rlim_cur - limp->rlim_cur;
765				addr = p->p_sysent->sv_usrstack -
766				    oldssiz.rlim_cur;
767			}
768			addr = trunc_page(addr);
769			size = round_page(size);
770			(void)vm_map_protect(&p->p_vmspace->vm_map,
771			    addr, addr + size, prot, FALSE);
772		}
773	}
774
775	return (0);
776}
777
778#ifndef _SYS_SYSPROTO_H_
779struct __getrlimit_args {
780	u_int	which;
781	struct	rlimit *rlp;
782};
783#endif
784/* ARGSUSED */
785int
786sys_getrlimit(td, uap)
787	struct thread *td;
788	register struct __getrlimit_args *uap;
789{
790	struct rlimit rlim;
791	struct proc *p;
792	int error;
793
794	if (uap->which >= RLIM_NLIMITS)
795		return (EINVAL);
796	p = td->td_proc;
797	PROC_LOCK(p);
798	lim_rlimit(p, uap->which, &rlim);
799	PROC_UNLOCK(p);
800	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
801	return (error);
802}
803
804/*
805 * Transform the running time and tick information for children of proc p
806 * into user and system time usage.
807 */
808void
809calccru(p, up, sp)
810	struct proc *p;
811	struct timeval *up;
812	struct timeval *sp;
813{
814
815	PROC_LOCK_ASSERT(p, MA_OWNED);
816	calcru1(p, &p->p_crux, up, sp);
817}
818
819/*
820 * Transform the running time and tick information in proc p into user
821 * and system time usage.  If appropriate, include the current time slice
822 * on this CPU.
823 */
824void
825calcru(struct proc *p, struct timeval *up, struct timeval *sp)
826{
827	struct thread *td;
828	uint64_t runtime, u;
829
830	PROC_LOCK_ASSERT(p, MA_OWNED);
831	PROC_SLOCK_ASSERT(p, MA_OWNED);
832	/*
833	 * If we are getting stats for the current process, then add in the
834	 * stats that this thread has accumulated in its current time slice.
835	 * We reset the thread and CPU state as if we had performed a context
836	 * switch right here.
837	 */
838	td = curthread;
839	if (td->td_proc == p) {
840		u = cpu_ticks();
841		runtime = u - PCPU_GET(switchtime);
842		td->td_runtime += runtime;
843		td->td_incruntime += runtime;
844		PCPU_SET(switchtime, u);
845	}
846	/* Make sure the per-thread stats are current. */
847	FOREACH_THREAD_IN_PROC(p, td) {
848		if (td->td_incruntime == 0)
849			continue;
850		ruxagg(p, td);
851	}
852	calcru1(p, &p->p_rux, up, sp);
853}
854
855/* Collect resource usage for a single thread. */
856void
857rufetchtd(struct thread *td, struct rusage *ru)
858{
859	struct proc *p;
860	uint64_t runtime, u;
861
862	p = td->td_proc;
863	PROC_SLOCK_ASSERT(p, MA_OWNED);
864	THREAD_LOCK_ASSERT(td, MA_OWNED);
865	/*
866	 * If we are getting stats for the current thread, then add in the
867	 * stats that this thread has accumulated in its current time slice.
868	 * We reset the thread and CPU state as if we had performed a context
869	 * switch right here.
870	 */
871	if (td == curthread) {
872		u = cpu_ticks();
873		runtime = u - PCPU_GET(switchtime);
874		td->td_runtime += runtime;
875		td->td_incruntime += runtime;
876		PCPU_SET(switchtime, u);
877	}
878	ruxagg(p, td);
879	*ru = td->td_ru;
880	calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
881}
882
883static void
884calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
885    struct timeval *sp)
886{
887	/* {user, system, interrupt, total} {ticks, usec}: */
888	uint64_t ut, uu, st, su, it, tt, tu;
889
890	ut = ruxp->rux_uticks;
891	st = ruxp->rux_sticks;
892	it = ruxp->rux_iticks;
893	tt = ut + st + it;
894	if (tt == 0) {
895		/* Avoid divide by zero */
896		st = 1;
897		tt = 1;
898	}
899	tu = cputick2usec(ruxp->rux_runtime);
900	if ((int64_t)tu < 0) {
901		/* XXX: this should be an assert /phk */
902		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
903		    (intmax_t)tu, p->p_pid, p->p_comm);
904		tu = ruxp->rux_tu;
905	}
906
907	if (tu >= ruxp->rux_tu) {
908		/*
909		 * The normal case, time increased.
910		 * Enforce monotonicity of bucketed numbers.
911		 */
912		uu = (tu * ut) / tt;
913		if (uu < ruxp->rux_uu)
914			uu = ruxp->rux_uu;
915		su = (tu * st) / tt;
916		if (su < ruxp->rux_su)
917			su = ruxp->rux_su;
918	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
919		/*
920		 * When we calibrate the cputicker, it is not uncommon to
921		 * see the presumably fixed frequency increase slightly over
922		 * time as a result of thermal stabilization and NTP
923		 * discipline (of the reference clock).  We therefore ignore
924		 * a bit of backwards slop because we  expect to catch up
925		 * shortly.  We use a 3 microsecond limit to catch low
926		 * counts and a 1% limit for high counts.
927		 */
928		uu = ruxp->rux_uu;
929		su = ruxp->rux_su;
930		tu = ruxp->rux_tu;
931	} else { /* tu < ruxp->rux_tu */
932		/*
933		 * What happened here was likely that a laptop, which ran at
934		 * a reduced clock frequency at boot, kicked into high gear.
935		 * The wisdom of spamming this message in that case is
936		 * dubious, but it might also be indicative of something
937		 * serious, so lets keep it and hope laptops can be made
938		 * more truthful about their CPU speed via ACPI.
939		 */
940		printf("calcru: runtime went backwards from %ju usec "
941		    "to %ju usec for pid %d (%s)\n",
942		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
943		    p->p_pid, p->p_comm);
944		uu = (tu * ut) / tt;
945		su = (tu * st) / tt;
946	}
947
948	ruxp->rux_uu = uu;
949	ruxp->rux_su = su;
950	ruxp->rux_tu = tu;
951
952	up->tv_sec = uu / 1000000;
953	up->tv_usec = uu % 1000000;
954	sp->tv_sec = su / 1000000;
955	sp->tv_usec = su % 1000000;
956}
957
958#ifndef _SYS_SYSPROTO_H_
959struct getrusage_args {
960	int	who;
961	struct	rusage *rusage;
962};
963#endif
964int
965sys_getrusage(td, uap)
966	register struct thread *td;
967	register struct getrusage_args *uap;
968{
969	struct rusage ru;
970	int error;
971
972	error = kern_getrusage(td, uap->who, &ru);
973	if (error == 0)
974		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
975	return (error);
976}
977
978int
979kern_getrusage(struct thread *td, int who, struct rusage *rup)
980{
981	struct proc *p;
982	int error;
983
984	error = 0;
985	p = td->td_proc;
986	PROC_LOCK(p);
987	switch (who) {
988	case RUSAGE_SELF:
989		rufetchcalc(p, rup, &rup->ru_utime,
990		    &rup->ru_stime);
991		break;
992
993	case RUSAGE_CHILDREN:
994		*rup = p->p_stats->p_cru;
995		calccru(p, &rup->ru_utime, &rup->ru_stime);
996		break;
997
998	case RUSAGE_THREAD:
999		PROC_SLOCK(p);
1000		thread_lock(td);
1001		rufetchtd(td, rup);
1002		thread_unlock(td);
1003		PROC_SUNLOCK(p);
1004		break;
1005
1006	default:
1007		error = EINVAL;
1008	}
1009	PROC_UNLOCK(p);
1010	return (error);
1011}
1012
1013void
1014rucollect(struct rusage *ru, struct rusage *ru2)
1015{
1016	long *ip, *ip2;
1017	int i;
1018
1019	if (ru->ru_maxrss < ru2->ru_maxrss)
1020		ru->ru_maxrss = ru2->ru_maxrss;
1021	ip = &ru->ru_first;
1022	ip2 = &ru2->ru_first;
1023	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1024		*ip++ += *ip2++;
1025}
1026
1027void
1028ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1029    struct rusage_ext *rux2)
1030{
1031
1032	rux->rux_runtime += rux2->rux_runtime;
1033	rux->rux_uticks += rux2->rux_uticks;
1034	rux->rux_sticks += rux2->rux_sticks;
1035	rux->rux_iticks += rux2->rux_iticks;
1036	rux->rux_uu += rux2->rux_uu;
1037	rux->rux_su += rux2->rux_su;
1038	rux->rux_tu += rux2->rux_tu;
1039	rucollect(ru, ru2);
1040}
1041
1042/*
1043 * Aggregate tick counts into the proc's rusage_ext.
1044 */
1045static void
1046ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1047{
1048
1049	THREAD_LOCK_ASSERT(td, MA_OWNED);
1050	PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1051	rux->rux_runtime += td->td_incruntime;
1052	rux->rux_uticks += td->td_uticks;
1053	rux->rux_sticks += td->td_sticks;
1054	rux->rux_iticks += td->td_iticks;
1055}
1056
1057void
1058ruxagg(struct proc *p, struct thread *td)
1059{
1060
1061	thread_lock(td);
1062	ruxagg_locked(&p->p_rux, td);
1063	ruxagg_locked(&td->td_rux, td);
1064	td->td_incruntime = 0;
1065	td->td_uticks = 0;
1066	td->td_iticks = 0;
1067	td->td_sticks = 0;
1068	thread_unlock(td);
1069}
1070
1071/*
1072 * Update the rusage_ext structure and fetch a valid aggregate rusage
1073 * for proc p if storage for one is supplied.
1074 */
1075void
1076rufetch(struct proc *p, struct rusage *ru)
1077{
1078	struct thread *td;
1079
1080	PROC_SLOCK_ASSERT(p, MA_OWNED);
1081
1082	*ru = p->p_ru;
1083	if (p->p_numthreads > 0)  {
1084		FOREACH_THREAD_IN_PROC(p, td) {
1085			ruxagg(p, td);
1086			rucollect(ru, &td->td_ru);
1087		}
1088	}
1089}
1090
1091/*
1092 * Atomically perform a rufetch and a calcru together.
1093 * Consumers, can safely assume the calcru is executed only once
1094 * rufetch is completed.
1095 */
1096void
1097rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1098    struct timeval *sp)
1099{
1100
1101	PROC_SLOCK(p);
1102	rufetch(p, ru);
1103	calcru(p, up, sp);
1104	PROC_SUNLOCK(p);
1105}
1106
1107/*
1108 * Allocate a new resource limits structure and initialize its
1109 * reference count and mutex pointer.
1110 */
1111struct plimit *
1112lim_alloc()
1113{
1114	struct plimit *limp;
1115
1116	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1117	refcount_init(&limp->pl_refcnt, 1);
1118	return (limp);
1119}
1120
1121struct plimit *
1122lim_hold(limp)
1123	struct plimit *limp;
1124{
1125
1126	refcount_acquire(&limp->pl_refcnt);
1127	return (limp);
1128}
1129
1130void
1131lim_fork(struct proc *p1, struct proc *p2)
1132{
1133	p2->p_limit = lim_hold(p1->p_limit);
1134	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1135	if (p1->p_cpulimit != RLIM_INFINITY)
1136		callout_reset(&p2->p_limco, hz, lim_cb, p2);
1137}
1138
1139void
1140lim_free(limp)
1141	struct plimit *limp;
1142{
1143
1144	KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1145	if (refcount_release(&limp->pl_refcnt))
1146		free((void *)limp, M_PLIMIT);
1147}
1148
1149/*
1150 * Make a copy of the plimit structure.
1151 * We share these structures copy-on-write after fork.
1152 */
1153void
1154lim_copy(dst, src)
1155	struct plimit *dst, *src;
1156{
1157
1158	KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1159	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1160}
1161
1162/*
1163 * Return the hard limit for a particular system resource.  The
1164 * which parameter specifies the index into the rlimit array.
1165 */
1166rlim_t
1167lim_max(struct proc *p, int which)
1168{
1169	struct rlimit rl;
1170
1171	lim_rlimit(p, which, &rl);
1172	return (rl.rlim_max);
1173}
1174
1175/*
1176 * Return the current (soft) limit for a particular system resource.
1177 * The which parameter which specifies the index into the rlimit array
1178 */
1179rlim_t
1180lim_cur(struct proc *p, int which)
1181{
1182	struct rlimit rl;
1183
1184	lim_rlimit(p, which, &rl);
1185	return (rl.rlim_cur);
1186}
1187
1188/*
1189 * Return a copy of the entire rlimit structure for the system limit
1190 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1191 */
1192void
1193lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1194{
1195
1196	PROC_LOCK_ASSERT(p, MA_OWNED);
1197	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1198	    ("request for invalid resource limit"));
1199	*rlp = p->p_limit->pl_rlimit[which];
1200	if (p->p_sysent->sv_fixlimit != NULL)
1201		p->p_sysent->sv_fixlimit(rlp, which);
1202}
1203
1204void
1205uihashinit()
1206{
1207
1208	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1209	rw_init(&uihashtbl_lock, "uidinfo hash");
1210}
1211
1212/*
1213 * Look up a uidinfo struct for the parameter uid.
1214 * uihashtbl_lock must be locked.
1215 */
1216static struct uidinfo *
1217uilookup(uid)
1218	uid_t uid;
1219{
1220	struct uihashhead *uipp;
1221	struct uidinfo *uip;
1222
1223	rw_assert(&uihashtbl_lock, RA_LOCKED);
1224	uipp = UIHASH(uid);
1225	LIST_FOREACH(uip, uipp, ui_hash)
1226		if (uip->ui_uid == uid)
1227			break;
1228
1229	return (uip);
1230}
1231
1232/*
1233 * Find or allocate a struct uidinfo for a particular uid.
1234 * Increase refcount on uidinfo struct returned.
1235 * uifree() should be called on a struct uidinfo when released.
1236 */
1237struct uidinfo *
1238uifind(uid)
1239	uid_t uid;
1240{
1241	struct uidinfo *old_uip, *uip;
1242
1243	rw_rlock(&uihashtbl_lock);
1244	uip = uilookup(uid);
1245	if (uip == NULL) {
1246		rw_runlock(&uihashtbl_lock);
1247		uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1248		racct_create(&uip->ui_racct);
1249		rw_wlock(&uihashtbl_lock);
1250		/*
1251		 * There's a chance someone created our uidinfo while we
1252		 * were in malloc and not holding the lock, so we have to
1253		 * make sure we don't insert a duplicate uidinfo.
1254		 */
1255		if ((old_uip = uilookup(uid)) != NULL) {
1256			/* Someone else beat us to it. */
1257			racct_destroy(&uip->ui_racct);
1258			free(uip, M_UIDINFO);
1259			uip = old_uip;
1260		} else {
1261			refcount_init(&uip->ui_ref, 0);
1262			uip->ui_uid = uid;
1263			mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1264			    MTX_DEF);
1265			LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1266		}
1267	}
1268	uihold(uip);
1269	rw_unlock(&uihashtbl_lock);
1270	return (uip);
1271}
1272
1273/*
1274 * Place another refcount on a uidinfo struct.
1275 */
1276void
1277uihold(uip)
1278	struct uidinfo *uip;
1279{
1280
1281	refcount_acquire(&uip->ui_ref);
1282}
1283
1284/*-
1285 * Since uidinfo structs have a long lifetime, we use an
1286 * opportunistic refcounting scheme to avoid locking the lookup hash
1287 * for each release.
1288 *
1289 * If the refcount hits 0, we need to free the structure,
1290 * which means we need to lock the hash.
1291 * Optimal case:
1292 *   After locking the struct and lowering the refcount, if we find
1293 *   that we don't need to free, simply unlock and return.
1294 * Suboptimal case:
1295 *   If refcount lowering results in need to free, bump the count
1296 *   back up, lose the lock and acquire the locks in the proper
1297 *   order to try again.
1298 */
1299void
1300uifree(uip)
1301	struct uidinfo *uip;
1302{
1303	int old;
1304
1305	/* Prepare for optimal case. */
1306	old = uip->ui_ref;
1307	if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1308		return;
1309
1310	/* Prepare for suboptimal case. */
1311	rw_wlock(&uihashtbl_lock);
1312	if (refcount_release(&uip->ui_ref)) {
1313		racct_destroy(&uip->ui_racct);
1314		LIST_REMOVE(uip, ui_hash);
1315		rw_wunlock(&uihashtbl_lock);
1316		if (uip->ui_sbsize != 0)
1317			printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1318			    uip->ui_uid, uip->ui_sbsize);
1319		if (uip->ui_proccnt != 0)
1320			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1321			    uip->ui_uid, uip->ui_proccnt);
1322		if (uip->ui_vmsize != 0)
1323			printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1324			    uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1325		mtx_destroy(&uip->ui_vmsize_mtx);
1326		free(uip, M_UIDINFO);
1327		return;
1328	}
1329	/*
1330	 * Someone added a reference between atomic_cmpset_int() and
1331	 * rw_wlock(&uihashtbl_lock).
1332	 */
1333	rw_wunlock(&uihashtbl_lock);
1334}
1335
1336void
1337ui_racct_foreach(void (*callback)(struct racct *racct,
1338    void *arg2, void *arg3), void *arg2, void *arg3)
1339{
1340	struct uidinfo *uip;
1341	struct uihashhead *uih;
1342
1343	rw_rlock(&uihashtbl_lock);
1344	for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1345		LIST_FOREACH(uip, uih, ui_hash) {
1346			(callback)(uip->ui_racct, arg2, arg3);
1347		}
1348	}
1349	rw_runlock(&uihashtbl_lock);
1350}
1351
1352/*
1353 * Change the count associated with number of processes
1354 * a given user is using.  When 'max' is 0, don't enforce a limit
1355 */
1356int
1357chgproccnt(uip, diff, max)
1358	struct	uidinfo	*uip;
1359	int	diff;
1360	rlim_t	max;
1361{
1362
1363	/* Don't allow them to exceed max, but allow subtraction. */
1364	if (diff > 0 && max != 0) {
1365		if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1366			atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1367			return (0);
1368		}
1369	} else {
1370		atomic_add_long(&uip->ui_proccnt, (long)diff);
1371		if (uip->ui_proccnt < 0)
1372			printf("negative proccnt for uid = %d\n", uip->ui_uid);
1373	}
1374	return (1);
1375}
1376
1377/*
1378 * Change the total socket buffer size a user has used.
1379 */
1380int
1381chgsbsize(uip, hiwat, to, max)
1382	struct	uidinfo	*uip;
1383	u_int  *hiwat;
1384	u_int	to;
1385	rlim_t	max;
1386{
1387	int diff;
1388
1389	diff = to - *hiwat;
1390	if (diff > 0) {
1391		if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1392			atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1393			return (0);
1394		}
1395	} else {
1396		atomic_add_long(&uip->ui_sbsize, (long)diff);
1397		if (uip->ui_sbsize < 0)
1398			printf("negative sbsize for uid = %d\n", uip->ui_uid);
1399	}
1400	*hiwat = to;
1401	return (1);
1402}
1403
1404/*
1405 * Change the count associated with number of pseudo-terminals
1406 * a given user is using.  When 'max' is 0, don't enforce a limit
1407 */
1408int
1409chgptscnt(uip, diff, max)
1410	struct	uidinfo	*uip;
1411	int	diff;
1412	rlim_t	max;
1413{
1414
1415	/* Don't allow them to exceed max, but allow subtraction. */
1416	if (diff > 0 && max != 0) {
1417		if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1418			atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1419			return (0);
1420		}
1421	} else {
1422		atomic_add_long(&uip->ui_ptscnt, (long)diff);
1423		if (uip->ui_ptscnt < 0)
1424			printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1425	}
1426	return (1);
1427}
1428