kern_resource.c revision 228207
1145171Sdas/*-
2145171Sdas * Copyright (c) 1982, 1986, 1991, 1993
3145171Sdas *	The Regents of the University of California.  All rights reserved.
4145171Sdas * (c) UNIX System Laboratories, Inc.
5145171Sdas * All or some portions of this file are derived from material licensed
6145171Sdas * to the University of California by American Telephone and Telegraph
7145171Sdas * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8145171Sdas * the permission of UNIX System Laboratories, Inc.
9145171Sdas *
10145171Sdas * Redistribution and use in source and binary forms, with or without
11145171Sdas * modification, are permitted provided that the following conditions
12145171Sdas * are met:
13145171Sdas * 1. Redistributions of source code must retain the above copyright
14145171Sdas *    notice, this list of conditions and the following disclaimer.
15145171Sdas * 2. Redistributions in binary form must reproduce the above copyright
16145171Sdas *    notice, this list of conditions and the following disclaimer in the
17145171Sdas *    documentation and/or other materials provided with the distribution.
18145171Sdas * 4. Neither the name of the University nor the names of its contributors
19145171Sdas *    may be used to endorse or promote products derived from this software
20145171Sdas *    without specific prior written permission.
21145171Sdas *
22145171Sdas * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23145171Sdas * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24145171Sdas * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25145171Sdas * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26145171Sdas * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27145171Sdas * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28145171Sdas * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29145171Sdas * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30145171Sdas * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31145171Sdas * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32145171Sdas * SUCH DAMAGE.
33145171Sdas *
34145171Sdas *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
35145171Sdas */
36145171Sdas
37145171Sdas#include <sys/cdefs.h>
38145171Sdas__FBSDID("$FreeBSD: head/sys/kern/kern_resource.c 228207 2011-12-02 19:59:46Z jhb $");
39145171Sdas
40192760Sattilio#include "opt_compat.h"
41150067Sdas
42150067Sdas#include <sys/param.h>
43150067Sdas#include <sys/systm.h>
44217108Skib#include <sys/sysproto.h>
45217108Skib#include <sys/file.h>
46#include <sys/kernel.h>
47#include <sys/lock.h>
48#include <sys/malloc.h>
49#include <sys/mutex.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/refcount.h>
53#include <sys/racct.h>
54#include <sys/resourcevar.h>
55#include <sys/rwlock.h>
56#include <sys/sched.h>
57#include <sys/sx.h>
58#include <sys/syscallsubr.h>
59#include <sys/sysent.h>
60#include <sys/time.h>
61#include <sys/umtx.h>
62
63#include <vm/vm.h>
64#include <vm/vm_param.h>
65#include <vm/pmap.h>
66#include <vm/vm_map.h>
67
68
69static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
70static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
71#define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
72static struct rwlock uihashtbl_lock;
73static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
74static u_long uihash;		/* size of hash table - 1 */
75
76static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
77		    struct timeval *up, struct timeval *sp);
78static int	donice(struct thread *td, struct proc *chgp, int n);
79static struct uidinfo *uilookup(uid_t uid);
80static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
81
82/*
83 * Resource controls and accounting.
84 */
85#ifndef _SYS_SYSPROTO_H_
86struct getpriority_args {
87	int	which;
88	int	who;
89};
90#endif
91int
92sys_getpriority(td, uap)
93	struct thread *td;
94	register struct getpriority_args *uap;
95{
96	struct proc *p;
97	struct pgrp *pg;
98	int error, low;
99
100	error = 0;
101	low = PRIO_MAX + 1;
102	switch (uap->which) {
103
104	case PRIO_PROCESS:
105		if (uap->who == 0)
106			low = td->td_proc->p_nice;
107		else {
108			p = pfind(uap->who);
109			if (p == NULL)
110				break;
111			if (p_cansee(td, p) == 0)
112				low = p->p_nice;
113			PROC_UNLOCK(p);
114		}
115		break;
116
117	case PRIO_PGRP:
118		sx_slock(&proctree_lock);
119		if (uap->who == 0) {
120			pg = td->td_proc->p_pgrp;
121			PGRP_LOCK(pg);
122		} else {
123			pg = pgfind(uap->who);
124			if (pg == NULL) {
125				sx_sunlock(&proctree_lock);
126				break;
127			}
128		}
129		sx_sunlock(&proctree_lock);
130		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
131			PROC_LOCK(p);
132			if (p->p_state == PRS_NORMAL &&
133			    p_cansee(td, p) == 0) {
134				if (p->p_nice < low)
135					low = p->p_nice;
136			}
137			PROC_UNLOCK(p);
138		}
139		PGRP_UNLOCK(pg);
140		break;
141
142	case PRIO_USER:
143		if (uap->who == 0)
144			uap->who = td->td_ucred->cr_uid;
145		sx_slock(&allproc_lock);
146		FOREACH_PROC_IN_SYSTEM(p) {
147			PROC_LOCK(p);
148			if (p->p_state == PRS_NORMAL &&
149			    p_cansee(td, p) == 0 &&
150			    p->p_ucred->cr_uid == uap->who) {
151				if (p->p_nice < low)
152					low = p->p_nice;
153			}
154			PROC_UNLOCK(p);
155		}
156		sx_sunlock(&allproc_lock);
157		break;
158
159	default:
160		error = EINVAL;
161		break;
162	}
163	if (low == PRIO_MAX + 1 && error == 0)
164		error = ESRCH;
165	td->td_retval[0] = low;
166	return (error);
167}
168
169#ifndef _SYS_SYSPROTO_H_
170struct setpriority_args {
171	int	which;
172	int	who;
173	int	prio;
174};
175#endif
176int
177sys_setpriority(td, uap)
178	struct thread *td;
179	struct setpriority_args *uap;
180{
181	struct proc *curp, *p;
182	struct pgrp *pg;
183	int found = 0, error = 0;
184
185	curp = td->td_proc;
186	switch (uap->which) {
187	case PRIO_PROCESS:
188		if (uap->who == 0) {
189			PROC_LOCK(curp);
190			error = donice(td, curp, uap->prio);
191			PROC_UNLOCK(curp);
192		} else {
193			p = pfind(uap->who);
194			if (p == NULL)
195				break;
196			error = p_cansee(td, p);
197			if (error == 0)
198				error = donice(td, p, uap->prio);
199			PROC_UNLOCK(p);
200		}
201		found++;
202		break;
203
204	case PRIO_PGRP:
205		sx_slock(&proctree_lock);
206		if (uap->who == 0) {
207			pg = curp->p_pgrp;
208			PGRP_LOCK(pg);
209		} else {
210			pg = pgfind(uap->who);
211			if (pg == NULL) {
212				sx_sunlock(&proctree_lock);
213				break;
214			}
215		}
216		sx_sunlock(&proctree_lock);
217		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
218			PROC_LOCK(p);
219			if (p->p_state == PRS_NORMAL &&
220			    p_cansee(td, p) == 0) {
221				error = donice(td, p, uap->prio);
222				found++;
223			}
224			PROC_UNLOCK(p);
225		}
226		PGRP_UNLOCK(pg);
227		break;
228
229	case PRIO_USER:
230		if (uap->who == 0)
231			uap->who = td->td_ucred->cr_uid;
232		sx_slock(&allproc_lock);
233		FOREACH_PROC_IN_SYSTEM(p) {
234			PROC_LOCK(p);
235			if (p->p_state == PRS_NORMAL &&
236			    p->p_ucred->cr_uid == uap->who &&
237			    p_cansee(td, p) == 0) {
238				error = donice(td, p, uap->prio);
239				found++;
240			}
241			PROC_UNLOCK(p);
242		}
243		sx_sunlock(&allproc_lock);
244		break;
245
246	default:
247		error = EINVAL;
248		break;
249	}
250	if (found == 0 && error == 0)
251		error = ESRCH;
252	return (error);
253}
254
255/*
256 * Set "nice" for a (whole) process.
257 */
258static int
259donice(struct thread *td, struct proc *p, int n)
260{
261	int error;
262
263	PROC_LOCK_ASSERT(p, MA_OWNED);
264	if ((error = p_cansched(td, p)))
265		return (error);
266	if (n > PRIO_MAX)
267		n = PRIO_MAX;
268	if (n < PRIO_MIN)
269		n = PRIO_MIN;
270	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
271		return (EACCES);
272	sched_nice(p, n);
273	return (0);
274}
275
276/*
277 * Set realtime priority for LWP.
278 */
279#ifndef _SYS_SYSPROTO_H_
280struct rtprio_thread_args {
281	int		function;
282	lwpid_t		lwpid;
283	struct rtprio	*rtp;
284};
285#endif
286int
287sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
288{
289	struct proc *p;
290	struct rtprio rtp;
291	struct thread *td1;
292	int cierror, error;
293
294	/* Perform copyin before acquiring locks if needed. */
295	if (uap->function == RTP_SET)
296		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
297	else
298		cierror = 0;
299
300	if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
301		p = td->td_proc;
302		td1 = td;
303		PROC_LOCK(p);
304	} else {
305		/* Only look up thread in current process */
306		td1 = tdfind(uap->lwpid, curproc->p_pid);
307		if (td1 == NULL)
308			return (ESRCH);
309		p = td1->td_proc;
310	}
311
312	switch (uap->function) {
313	case RTP_LOOKUP:
314		if ((error = p_cansee(td, p)))
315			break;
316		pri_to_rtp(td1, &rtp);
317		PROC_UNLOCK(p);
318		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
319	case RTP_SET:
320		if ((error = p_cansched(td, p)) || (error = cierror))
321			break;
322
323		/* Disallow setting rtprio in most cases if not superuser. */
324/*
325 * Realtime priority has to be restricted for reasons which should be
326 * obvious.  However, for idle priority, there is a potential for
327 * system deadlock if an idleprio process gains a lock on a resource
328 * that other processes need (and the idleprio process can't run
329 * due to a CPU-bound normal process).  Fix me!  XXX
330 */
331#if 0
332		if (RTP_PRIO_IS_REALTIME(rtp.type)) {
333#else
334		if (rtp.type != RTP_PRIO_NORMAL) {
335#endif
336			error = priv_check(td, PRIV_SCHED_RTPRIO);
337			if (error)
338				break;
339		}
340		error = rtp_to_pri(&rtp, td1);
341		break;
342	default:
343		error = EINVAL;
344		break;
345	}
346	PROC_UNLOCK(p);
347	return (error);
348}
349
350/*
351 * Set realtime priority.
352 */
353#ifndef _SYS_SYSPROTO_H_
354struct rtprio_args {
355	int		function;
356	pid_t		pid;
357	struct rtprio	*rtp;
358};
359#endif
360int
361sys_rtprio(td, uap)
362	struct thread *td;		/* curthread */
363	register struct rtprio_args *uap;
364{
365	struct proc *p;
366	struct thread *tdp;
367	struct rtprio rtp;
368	int cierror, error;
369
370	/* Perform copyin before acquiring locks if needed. */
371	if (uap->function == RTP_SET)
372		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
373	else
374		cierror = 0;
375
376	if (uap->pid == 0) {
377		p = td->td_proc;
378		PROC_LOCK(p);
379	} else {
380		p = pfind(uap->pid);
381		if (p == NULL)
382			return (ESRCH);
383	}
384
385	switch (uap->function) {
386	case RTP_LOOKUP:
387		if ((error = p_cansee(td, p)))
388			break;
389		/*
390		 * Return OUR priority if no pid specified,
391		 * or if one is, report the highest priority
392		 * in the process.  There isn't much more you can do as
393		 * there is only room to return a single priority.
394		 * Note: specifying our own pid is not the same
395		 * as leaving it zero.
396		 */
397		if (uap->pid == 0) {
398			pri_to_rtp(td, &rtp);
399		} else {
400			struct rtprio rtp2;
401
402			rtp.type = RTP_PRIO_IDLE;
403			rtp.prio = RTP_PRIO_MAX;
404			FOREACH_THREAD_IN_PROC(p, tdp) {
405				pri_to_rtp(tdp, &rtp2);
406				if (rtp2.type <  rtp.type ||
407				    (rtp2.type == rtp.type &&
408				    rtp2.prio < rtp.prio)) {
409					rtp.type = rtp2.type;
410					rtp.prio = rtp2.prio;
411				}
412			}
413		}
414		PROC_UNLOCK(p);
415		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
416	case RTP_SET:
417		if ((error = p_cansched(td, p)) || (error = cierror))
418			break;
419
420		/* Disallow setting rtprio in most cases if not superuser. */
421/*
422 * Realtime priority has to be restricted for reasons which should be
423 * obvious.  However, for idle priority, there is a potential for
424 * system deadlock if an idleprio process gains a lock on a resource
425 * that other processes need (and the idleprio process can't run
426 * due to a CPU-bound normal process).  Fix me!  XXX
427 */
428#if 0
429		if (RTP_PRIO_IS_REALTIME(rtp.type)) {
430#else
431		if (rtp.type != RTP_PRIO_NORMAL) {
432#endif
433			error = priv_check(td, PRIV_SCHED_RTPRIO);
434			if (error)
435				break;
436		}
437
438		/*
439		 * If we are setting our own priority, set just our
440		 * thread but if we are doing another process,
441		 * do all the threads on that process. If we
442		 * specify our own pid we do the latter.
443		 */
444		if (uap->pid == 0) {
445			error = rtp_to_pri(&rtp, td);
446		} else {
447			FOREACH_THREAD_IN_PROC(p, td) {
448				if ((error = rtp_to_pri(&rtp, td)) != 0)
449					break;
450			}
451		}
452		break;
453	default:
454		error = EINVAL;
455		break;
456	}
457	PROC_UNLOCK(p);
458	return (error);
459}
460
461int
462rtp_to_pri(struct rtprio *rtp, struct thread *td)
463{
464	u_char	newpri;
465	u_char	oldpri;
466
467	switch (RTP_PRIO_BASE(rtp->type)) {
468	case RTP_PRIO_REALTIME:
469		if (rtp->prio > RTP_PRIO_MAX)
470			return (EINVAL);
471		newpri = PRI_MIN_REALTIME + rtp->prio;
472		break;
473	case RTP_PRIO_NORMAL:
474		if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
475			return (EINVAL);
476		newpri = PRI_MIN_TIMESHARE + rtp->prio;
477		break;
478	case RTP_PRIO_IDLE:
479		if (rtp->prio > RTP_PRIO_MAX)
480			return (EINVAL);
481		newpri = PRI_MIN_IDLE + rtp->prio;
482		break;
483	default:
484		return (EINVAL);
485	}
486
487	thread_lock(td);
488	sched_class(td, rtp->type);	/* XXX fix */
489	oldpri = td->td_user_pri;
490	sched_user_prio(td, newpri);
491	if (td->td_user_pri != oldpri && (td == curthread ||
492	    td->td_priority == oldpri || td->td_user_pri >= PRI_MAX_REALTIME))
493		sched_prio(td, td->td_user_pri);
494	if (TD_ON_UPILOCK(td) && oldpri != newpri) {
495		critical_enter();
496		thread_unlock(td);
497		umtx_pi_adjust(td, oldpri);
498		critical_exit();
499	} else
500		thread_unlock(td);
501	return (0);
502}
503
504void
505pri_to_rtp(struct thread *td, struct rtprio *rtp)
506{
507
508	thread_lock(td);
509	switch (PRI_BASE(td->td_pri_class)) {
510	case PRI_REALTIME:
511		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
512		break;
513	case PRI_TIMESHARE:
514		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
515		break;
516	case PRI_IDLE:
517		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
518		break;
519	default:
520		break;
521	}
522	rtp->type = td->td_pri_class;
523	thread_unlock(td);
524}
525
526#if defined(COMPAT_43)
527#ifndef _SYS_SYSPROTO_H_
528struct osetrlimit_args {
529	u_int	which;
530	struct	orlimit *rlp;
531};
532#endif
533int
534osetrlimit(td, uap)
535	struct thread *td;
536	register struct osetrlimit_args *uap;
537{
538	struct orlimit olim;
539	struct rlimit lim;
540	int error;
541
542	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
543		return (error);
544	lim.rlim_cur = olim.rlim_cur;
545	lim.rlim_max = olim.rlim_max;
546	error = kern_setrlimit(td, uap->which, &lim);
547	return (error);
548}
549
550#ifndef _SYS_SYSPROTO_H_
551struct ogetrlimit_args {
552	u_int	which;
553	struct	orlimit *rlp;
554};
555#endif
556int
557ogetrlimit(td, uap)
558	struct thread *td;
559	register struct ogetrlimit_args *uap;
560{
561	struct orlimit olim;
562	struct rlimit rl;
563	struct proc *p;
564	int error;
565
566	if (uap->which >= RLIM_NLIMITS)
567		return (EINVAL);
568	p = td->td_proc;
569	PROC_LOCK(p);
570	lim_rlimit(p, uap->which, &rl);
571	PROC_UNLOCK(p);
572
573	/*
574	 * XXX would be more correct to convert only RLIM_INFINITY to the
575	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
576	 * values.  Most 64->32 and 32->16 conversions, including not
577	 * unimportant ones of uids are even more broken than what we
578	 * do here (they blindly truncate).  We don't do this correctly
579	 * here since we have little experience with EOVERFLOW yet.
580	 * Elsewhere, getuid() can't fail...
581	 */
582	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
583	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
584	error = copyout(&olim, uap->rlp, sizeof(olim));
585	return (error);
586}
587#endif /* COMPAT_43 */
588
589#ifndef _SYS_SYSPROTO_H_
590struct __setrlimit_args {
591	u_int	which;
592	struct	rlimit *rlp;
593};
594#endif
595int
596sys_setrlimit(td, uap)
597	struct thread *td;
598	register struct __setrlimit_args *uap;
599{
600	struct rlimit alim;
601	int error;
602
603	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
604		return (error);
605	error = kern_setrlimit(td, uap->which, &alim);
606	return (error);
607}
608
609static void
610lim_cb(void *arg)
611{
612	struct rlimit rlim;
613	struct thread *td;
614	struct proc *p;
615
616	p = arg;
617	PROC_LOCK_ASSERT(p, MA_OWNED);
618	/*
619	 * Check if the process exceeds its cpu resource allocation.  If
620	 * it reaches the max, arrange to kill the process in ast().
621	 */
622	if (p->p_cpulimit == RLIM_INFINITY)
623		return;
624	PROC_SLOCK(p);
625	FOREACH_THREAD_IN_PROC(p, td) {
626		ruxagg(p, td);
627	}
628	PROC_SUNLOCK(p);
629	if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
630		lim_rlimit(p, RLIMIT_CPU, &rlim);
631		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
632			killproc(p, "exceeded maximum CPU limit");
633		} else {
634			if (p->p_cpulimit < rlim.rlim_max)
635				p->p_cpulimit += 5;
636			kern_psignal(p, SIGXCPU);
637		}
638	}
639	if ((p->p_flag & P_WEXIT) == 0)
640		callout_reset(&p->p_limco, hz, lim_cb, p);
641}
642
643int
644kern_setrlimit(td, which, limp)
645	struct thread *td;
646	u_int which;
647	struct rlimit *limp;
648{
649	struct plimit *newlim, *oldlim;
650	struct proc *p;
651	register struct rlimit *alimp;
652	struct rlimit oldssiz;
653	int error;
654
655	if (which >= RLIM_NLIMITS)
656		return (EINVAL);
657
658	/*
659	 * Preserve historical bugs by treating negative limits as unsigned.
660	 */
661	if (limp->rlim_cur < 0)
662		limp->rlim_cur = RLIM_INFINITY;
663	if (limp->rlim_max < 0)
664		limp->rlim_max = RLIM_INFINITY;
665
666	oldssiz.rlim_cur = 0;
667	p = td->td_proc;
668	newlim = lim_alloc();
669	PROC_LOCK(p);
670	oldlim = p->p_limit;
671	alimp = &oldlim->pl_rlimit[which];
672	if (limp->rlim_cur > alimp->rlim_max ||
673	    limp->rlim_max > alimp->rlim_max)
674		if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
675			PROC_UNLOCK(p);
676			lim_free(newlim);
677			return (error);
678		}
679	if (limp->rlim_cur > limp->rlim_max)
680		limp->rlim_cur = limp->rlim_max;
681	lim_copy(newlim, oldlim);
682	alimp = &newlim->pl_rlimit[which];
683
684	switch (which) {
685
686	case RLIMIT_CPU:
687		if (limp->rlim_cur != RLIM_INFINITY &&
688		    p->p_cpulimit == RLIM_INFINITY)
689			callout_reset(&p->p_limco, hz, lim_cb, p);
690		p->p_cpulimit = limp->rlim_cur;
691		break;
692	case RLIMIT_DATA:
693		if (limp->rlim_cur > maxdsiz)
694			limp->rlim_cur = maxdsiz;
695		if (limp->rlim_max > maxdsiz)
696			limp->rlim_max = maxdsiz;
697		break;
698
699	case RLIMIT_STACK:
700		if (limp->rlim_cur > maxssiz)
701			limp->rlim_cur = maxssiz;
702		if (limp->rlim_max > maxssiz)
703			limp->rlim_max = maxssiz;
704		oldssiz = *alimp;
705		if (p->p_sysent->sv_fixlimit != NULL)
706			p->p_sysent->sv_fixlimit(&oldssiz,
707			    RLIMIT_STACK);
708		break;
709
710	case RLIMIT_NOFILE:
711		if (limp->rlim_cur > maxfilesperproc)
712			limp->rlim_cur = maxfilesperproc;
713		if (limp->rlim_max > maxfilesperproc)
714			limp->rlim_max = maxfilesperproc;
715		break;
716
717	case RLIMIT_NPROC:
718		if (limp->rlim_cur > maxprocperuid)
719			limp->rlim_cur = maxprocperuid;
720		if (limp->rlim_max > maxprocperuid)
721			limp->rlim_max = maxprocperuid;
722		if (limp->rlim_cur < 1)
723			limp->rlim_cur = 1;
724		if (limp->rlim_max < 1)
725			limp->rlim_max = 1;
726		break;
727	}
728	if (p->p_sysent->sv_fixlimit != NULL)
729		p->p_sysent->sv_fixlimit(limp, which);
730	*alimp = *limp;
731	p->p_limit = newlim;
732	PROC_UNLOCK(p);
733	lim_free(oldlim);
734
735	if (which == RLIMIT_STACK) {
736		/*
737		 * Stack is allocated to the max at exec time with only
738		 * "rlim_cur" bytes accessible.  If stack limit is going
739		 * up make more accessible, if going down make inaccessible.
740		 */
741		if (limp->rlim_cur != oldssiz.rlim_cur) {
742			vm_offset_t addr;
743			vm_size_t size;
744			vm_prot_t prot;
745
746			if (limp->rlim_cur > oldssiz.rlim_cur) {
747				prot = p->p_sysent->sv_stackprot;
748				size = limp->rlim_cur - oldssiz.rlim_cur;
749				addr = p->p_sysent->sv_usrstack -
750				    limp->rlim_cur;
751			} else {
752				prot = VM_PROT_NONE;
753				size = oldssiz.rlim_cur - limp->rlim_cur;
754				addr = p->p_sysent->sv_usrstack -
755				    oldssiz.rlim_cur;
756			}
757			addr = trunc_page(addr);
758			size = round_page(size);
759			(void)vm_map_protect(&p->p_vmspace->vm_map,
760			    addr, addr + size, prot, FALSE);
761		}
762	}
763
764	return (0);
765}
766
767#ifndef _SYS_SYSPROTO_H_
768struct __getrlimit_args {
769	u_int	which;
770	struct	rlimit *rlp;
771};
772#endif
773/* ARGSUSED */
774int
775sys_getrlimit(td, uap)
776	struct thread *td;
777	register struct __getrlimit_args *uap;
778{
779	struct rlimit rlim;
780	struct proc *p;
781	int error;
782
783	if (uap->which >= RLIM_NLIMITS)
784		return (EINVAL);
785	p = td->td_proc;
786	PROC_LOCK(p);
787	lim_rlimit(p, uap->which, &rlim);
788	PROC_UNLOCK(p);
789	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
790	return (error);
791}
792
793/*
794 * Transform the running time and tick information for children of proc p
795 * into user and system time usage.
796 */
797void
798calccru(p, up, sp)
799	struct proc *p;
800	struct timeval *up;
801	struct timeval *sp;
802{
803
804	PROC_LOCK_ASSERT(p, MA_OWNED);
805	calcru1(p, &p->p_crux, up, sp);
806}
807
808/*
809 * Transform the running time and tick information in proc p into user
810 * and system time usage.  If appropriate, include the current time slice
811 * on this CPU.
812 */
813void
814calcru(struct proc *p, struct timeval *up, struct timeval *sp)
815{
816	struct thread *td;
817	uint64_t runtime, u;
818
819	PROC_LOCK_ASSERT(p, MA_OWNED);
820	PROC_SLOCK_ASSERT(p, MA_OWNED);
821	/*
822	 * If we are getting stats for the current process, then add in the
823	 * stats that this thread has accumulated in its current time slice.
824	 * We reset the thread and CPU state as if we had performed a context
825	 * switch right here.
826	 */
827	td = curthread;
828	if (td->td_proc == p) {
829		u = cpu_ticks();
830		runtime = u - PCPU_GET(switchtime);
831		td->td_runtime += runtime;
832		td->td_incruntime += runtime;
833		PCPU_SET(switchtime, u);
834	}
835	/* Make sure the per-thread stats are current. */
836	FOREACH_THREAD_IN_PROC(p, td) {
837		if (td->td_incruntime == 0)
838			continue;
839		ruxagg(p, td);
840	}
841	calcru1(p, &p->p_rux, up, sp);
842}
843
844/* Collect resource usage for a single thread. */
845void
846rufetchtd(struct thread *td, struct rusage *ru)
847{
848	struct proc *p;
849	uint64_t runtime, u;
850
851	p = td->td_proc;
852	PROC_SLOCK_ASSERT(p, MA_OWNED);
853	THREAD_LOCK_ASSERT(td, MA_OWNED);
854	/*
855	 * If we are getting stats for the current thread, then add in the
856	 * stats that this thread has accumulated in its current time slice.
857	 * We reset the thread and CPU state as if we had performed a context
858	 * switch right here.
859	 */
860	if (td == curthread) {
861		u = cpu_ticks();
862		runtime = u - PCPU_GET(switchtime);
863		td->td_runtime += runtime;
864		td->td_incruntime += runtime;
865		PCPU_SET(switchtime, u);
866	}
867	ruxagg(p, td);
868	*ru = td->td_ru;
869	calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
870}
871
872static void
873calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
874    struct timeval *sp)
875{
876	/* {user, system, interrupt, total} {ticks, usec}: */
877	uint64_t ut, uu, st, su, it, tt, tu;
878
879	ut = ruxp->rux_uticks;
880	st = ruxp->rux_sticks;
881	it = ruxp->rux_iticks;
882	tt = ut + st + it;
883	if (tt == 0) {
884		/* Avoid divide by zero */
885		st = 1;
886		tt = 1;
887	}
888	tu = cputick2usec(ruxp->rux_runtime);
889	if ((int64_t)tu < 0) {
890		/* XXX: this should be an assert /phk */
891		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
892		    (intmax_t)tu, p->p_pid, p->p_comm);
893		tu = ruxp->rux_tu;
894	}
895
896	if (tu >= ruxp->rux_tu) {
897		/*
898		 * The normal case, time increased.
899		 * Enforce monotonicity of bucketed numbers.
900		 */
901		uu = (tu * ut) / tt;
902		if (uu < ruxp->rux_uu)
903			uu = ruxp->rux_uu;
904		su = (tu * st) / tt;
905		if (su < ruxp->rux_su)
906			su = ruxp->rux_su;
907	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
908		/*
909		 * When we calibrate the cputicker, it is not uncommon to
910		 * see the presumably fixed frequency increase slightly over
911		 * time as a result of thermal stabilization and NTP
912		 * discipline (of the reference clock).  We therefore ignore
913		 * a bit of backwards slop because we  expect to catch up
914		 * shortly.  We use a 3 microsecond limit to catch low
915		 * counts and a 1% limit for high counts.
916		 */
917		uu = ruxp->rux_uu;
918		su = ruxp->rux_su;
919		tu = ruxp->rux_tu;
920	} else { /* tu < ruxp->rux_tu */
921		/*
922		 * What happened here was likely that a laptop, which ran at
923		 * a reduced clock frequency at boot, kicked into high gear.
924		 * The wisdom of spamming this message in that case is
925		 * dubious, but it might also be indicative of something
926		 * serious, so lets keep it and hope laptops can be made
927		 * more truthful about their CPU speed via ACPI.
928		 */
929		printf("calcru: runtime went backwards from %ju usec "
930		    "to %ju usec for pid %d (%s)\n",
931		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
932		    p->p_pid, p->p_comm);
933		uu = (tu * ut) / tt;
934		su = (tu * st) / tt;
935	}
936
937	ruxp->rux_uu = uu;
938	ruxp->rux_su = su;
939	ruxp->rux_tu = tu;
940
941	up->tv_sec = uu / 1000000;
942	up->tv_usec = uu % 1000000;
943	sp->tv_sec = su / 1000000;
944	sp->tv_usec = su % 1000000;
945}
946
947#ifndef _SYS_SYSPROTO_H_
948struct getrusage_args {
949	int	who;
950	struct	rusage *rusage;
951};
952#endif
953int
954sys_getrusage(td, uap)
955	register struct thread *td;
956	register struct getrusage_args *uap;
957{
958	struct rusage ru;
959	int error;
960
961	error = kern_getrusage(td, uap->who, &ru);
962	if (error == 0)
963		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
964	return (error);
965}
966
967int
968kern_getrusage(struct thread *td, int who, struct rusage *rup)
969{
970	struct proc *p;
971	int error;
972
973	error = 0;
974	p = td->td_proc;
975	PROC_LOCK(p);
976	switch (who) {
977	case RUSAGE_SELF:
978		rufetchcalc(p, rup, &rup->ru_utime,
979		    &rup->ru_stime);
980		break;
981
982	case RUSAGE_CHILDREN:
983		*rup = p->p_stats->p_cru;
984		calccru(p, &rup->ru_utime, &rup->ru_stime);
985		break;
986
987	case RUSAGE_THREAD:
988		PROC_SLOCK(p);
989		thread_lock(td);
990		rufetchtd(td, rup);
991		thread_unlock(td);
992		PROC_SUNLOCK(p);
993		break;
994
995	default:
996		error = EINVAL;
997	}
998	PROC_UNLOCK(p);
999	return (error);
1000}
1001
1002void
1003rucollect(struct rusage *ru, struct rusage *ru2)
1004{
1005	long *ip, *ip2;
1006	int i;
1007
1008	if (ru->ru_maxrss < ru2->ru_maxrss)
1009		ru->ru_maxrss = ru2->ru_maxrss;
1010	ip = &ru->ru_first;
1011	ip2 = &ru2->ru_first;
1012	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1013		*ip++ += *ip2++;
1014}
1015
1016void
1017ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1018    struct rusage_ext *rux2)
1019{
1020
1021	rux->rux_runtime += rux2->rux_runtime;
1022	rux->rux_uticks += rux2->rux_uticks;
1023	rux->rux_sticks += rux2->rux_sticks;
1024	rux->rux_iticks += rux2->rux_iticks;
1025	rux->rux_uu += rux2->rux_uu;
1026	rux->rux_su += rux2->rux_su;
1027	rux->rux_tu += rux2->rux_tu;
1028	rucollect(ru, ru2);
1029}
1030
1031/*
1032 * Aggregate tick counts into the proc's rusage_ext.
1033 */
1034static void
1035ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1036{
1037
1038	THREAD_LOCK_ASSERT(td, MA_OWNED);
1039	PROC_SLOCK_ASSERT(td->td_proc, MA_OWNED);
1040	rux->rux_runtime += td->td_incruntime;
1041	rux->rux_uticks += td->td_uticks;
1042	rux->rux_sticks += td->td_sticks;
1043	rux->rux_iticks += td->td_iticks;
1044}
1045
1046void
1047ruxagg(struct proc *p, struct thread *td)
1048{
1049
1050	thread_lock(td);
1051	ruxagg_locked(&p->p_rux, td);
1052	ruxagg_locked(&td->td_rux, td);
1053	td->td_incruntime = 0;
1054	td->td_uticks = 0;
1055	td->td_iticks = 0;
1056	td->td_sticks = 0;
1057	thread_unlock(td);
1058}
1059
1060/*
1061 * Update the rusage_ext structure and fetch a valid aggregate rusage
1062 * for proc p if storage for one is supplied.
1063 */
1064void
1065rufetch(struct proc *p, struct rusage *ru)
1066{
1067	struct thread *td;
1068
1069	PROC_SLOCK_ASSERT(p, MA_OWNED);
1070
1071	*ru = p->p_ru;
1072	if (p->p_numthreads > 0)  {
1073		FOREACH_THREAD_IN_PROC(p, td) {
1074			ruxagg(p, td);
1075			rucollect(ru, &td->td_ru);
1076		}
1077	}
1078}
1079
1080/*
1081 * Atomically perform a rufetch and a calcru together.
1082 * Consumers, can safely assume the calcru is executed only once
1083 * rufetch is completed.
1084 */
1085void
1086rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1087    struct timeval *sp)
1088{
1089
1090	PROC_SLOCK(p);
1091	rufetch(p, ru);
1092	calcru(p, up, sp);
1093	PROC_SUNLOCK(p);
1094}
1095
1096/*
1097 * Allocate a new resource limits structure and initialize its
1098 * reference count and mutex pointer.
1099 */
1100struct plimit *
1101lim_alloc()
1102{
1103	struct plimit *limp;
1104
1105	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1106	refcount_init(&limp->pl_refcnt, 1);
1107	return (limp);
1108}
1109
1110struct plimit *
1111lim_hold(limp)
1112	struct plimit *limp;
1113{
1114
1115	refcount_acquire(&limp->pl_refcnt);
1116	return (limp);
1117}
1118
1119void
1120lim_fork(struct proc *p1, struct proc *p2)
1121{
1122
1123	PROC_LOCK_ASSERT(p1, MA_OWNED);
1124	PROC_LOCK_ASSERT(p2, MA_OWNED);
1125
1126	p2->p_limit = lim_hold(p1->p_limit);
1127	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1128	if (p1->p_cpulimit != RLIM_INFINITY)
1129		callout_reset(&p2->p_limco, hz, lim_cb, p2);
1130}
1131
1132void
1133lim_free(limp)
1134	struct plimit *limp;
1135{
1136
1137	KASSERT(limp->pl_refcnt > 0, ("plimit refcnt underflow"));
1138	if (refcount_release(&limp->pl_refcnt))
1139		free((void *)limp, M_PLIMIT);
1140}
1141
1142/*
1143 * Make a copy of the plimit structure.
1144 * We share these structures copy-on-write after fork.
1145 */
1146void
1147lim_copy(dst, src)
1148	struct plimit *dst, *src;
1149{
1150
1151	KASSERT(dst->pl_refcnt == 1, ("lim_copy to shared limit"));
1152	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1153}
1154
1155/*
1156 * Return the hard limit for a particular system resource.  The
1157 * which parameter specifies the index into the rlimit array.
1158 */
1159rlim_t
1160lim_max(struct proc *p, int which)
1161{
1162	struct rlimit rl;
1163
1164	lim_rlimit(p, which, &rl);
1165	return (rl.rlim_max);
1166}
1167
1168/*
1169 * Return the current (soft) limit for a particular system resource.
1170 * The which parameter which specifies the index into the rlimit array
1171 */
1172rlim_t
1173lim_cur(struct proc *p, int which)
1174{
1175	struct rlimit rl;
1176
1177	lim_rlimit(p, which, &rl);
1178	return (rl.rlim_cur);
1179}
1180
1181/*
1182 * Return a copy of the entire rlimit structure for the system limit
1183 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1184 */
1185void
1186lim_rlimit(struct proc *p, int which, struct rlimit *rlp)
1187{
1188
1189	PROC_LOCK_ASSERT(p, MA_OWNED);
1190	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1191	    ("request for invalid resource limit"));
1192	*rlp = p->p_limit->pl_rlimit[which];
1193	if (p->p_sysent->sv_fixlimit != NULL)
1194		p->p_sysent->sv_fixlimit(rlp, which);
1195}
1196
1197void
1198uihashinit()
1199{
1200
1201	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1202	rw_init(&uihashtbl_lock, "uidinfo hash");
1203}
1204
1205/*
1206 * Look up a uidinfo struct for the parameter uid.
1207 * uihashtbl_lock must be locked.
1208 */
1209static struct uidinfo *
1210uilookup(uid)
1211	uid_t uid;
1212{
1213	struct uihashhead *uipp;
1214	struct uidinfo *uip;
1215
1216	rw_assert(&uihashtbl_lock, RA_LOCKED);
1217	uipp = UIHASH(uid);
1218	LIST_FOREACH(uip, uipp, ui_hash)
1219		if (uip->ui_uid == uid)
1220			break;
1221
1222	return (uip);
1223}
1224
1225/*
1226 * Find or allocate a struct uidinfo for a particular uid.
1227 * Increase refcount on uidinfo struct returned.
1228 * uifree() should be called on a struct uidinfo when released.
1229 */
1230struct uidinfo *
1231uifind(uid)
1232	uid_t uid;
1233{
1234	struct uidinfo *old_uip, *uip;
1235
1236	rw_rlock(&uihashtbl_lock);
1237	uip = uilookup(uid);
1238	if (uip == NULL) {
1239		rw_runlock(&uihashtbl_lock);
1240		uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
1241		racct_create(&uip->ui_racct);
1242		rw_wlock(&uihashtbl_lock);
1243		/*
1244		 * There's a chance someone created our uidinfo while we
1245		 * were in malloc and not holding the lock, so we have to
1246		 * make sure we don't insert a duplicate uidinfo.
1247		 */
1248		if ((old_uip = uilookup(uid)) != NULL) {
1249			/* Someone else beat us to it. */
1250			racct_destroy(&uip->ui_racct);
1251			free(uip, M_UIDINFO);
1252			uip = old_uip;
1253		} else {
1254			refcount_init(&uip->ui_ref, 0);
1255			uip->ui_uid = uid;
1256			mtx_init(&uip->ui_vmsize_mtx, "ui_vmsize", NULL,
1257			    MTX_DEF);
1258			LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
1259		}
1260	}
1261	uihold(uip);
1262	rw_unlock(&uihashtbl_lock);
1263	return (uip);
1264}
1265
1266/*
1267 * Place another refcount on a uidinfo struct.
1268 */
1269void
1270uihold(uip)
1271	struct uidinfo *uip;
1272{
1273
1274	refcount_acquire(&uip->ui_ref);
1275}
1276
1277/*-
1278 * Since uidinfo structs have a long lifetime, we use an
1279 * opportunistic refcounting scheme to avoid locking the lookup hash
1280 * for each release.
1281 *
1282 * If the refcount hits 0, we need to free the structure,
1283 * which means we need to lock the hash.
1284 * Optimal case:
1285 *   After locking the struct and lowering the refcount, if we find
1286 *   that we don't need to free, simply unlock and return.
1287 * Suboptimal case:
1288 *   If refcount lowering results in need to free, bump the count
1289 *   back up, lose the lock and acquire the locks in the proper
1290 *   order to try again.
1291 */
1292void
1293uifree(uip)
1294	struct uidinfo *uip;
1295{
1296	int old;
1297
1298	/* Prepare for optimal case. */
1299	old = uip->ui_ref;
1300	if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1301		return;
1302
1303	/* Prepare for suboptimal case. */
1304	rw_wlock(&uihashtbl_lock);
1305	if (refcount_release(&uip->ui_ref)) {
1306		racct_destroy(&uip->ui_racct);
1307		LIST_REMOVE(uip, ui_hash);
1308		rw_wunlock(&uihashtbl_lock);
1309		if (uip->ui_sbsize != 0)
1310			printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1311			    uip->ui_uid, uip->ui_sbsize);
1312		if (uip->ui_proccnt != 0)
1313			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1314			    uip->ui_uid, uip->ui_proccnt);
1315		if (uip->ui_vmsize != 0)
1316			printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1317			    uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1318		mtx_destroy(&uip->ui_vmsize_mtx);
1319		free(uip, M_UIDINFO);
1320		return;
1321	}
1322	/*
1323	 * Someone added a reference between atomic_cmpset_int() and
1324	 * rw_wlock(&uihashtbl_lock).
1325	 */
1326	rw_wunlock(&uihashtbl_lock);
1327}
1328
1329void
1330ui_racct_foreach(void (*callback)(struct racct *racct,
1331    void *arg2, void *arg3), void *arg2, void *arg3)
1332{
1333	struct uidinfo *uip;
1334	struct uihashhead *uih;
1335
1336	rw_rlock(&uihashtbl_lock);
1337	for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1338		LIST_FOREACH(uip, uih, ui_hash) {
1339			(callback)(uip->ui_racct, arg2, arg3);
1340		}
1341	}
1342	rw_runlock(&uihashtbl_lock);
1343}
1344
1345/*
1346 * Change the count associated with number of processes
1347 * a given user is using.  When 'max' is 0, don't enforce a limit
1348 */
1349int
1350chgproccnt(uip, diff, max)
1351	struct	uidinfo	*uip;
1352	int	diff;
1353	rlim_t	max;
1354{
1355
1356	/* Don't allow them to exceed max, but allow subtraction. */
1357	if (diff > 0 && max != 0) {
1358		if (atomic_fetchadd_long(&uip->ui_proccnt, (long)diff) + diff > max) {
1359			atomic_subtract_long(&uip->ui_proccnt, (long)diff);
1360			return (0);
1361		}
1362	} else {
1363		atomic_add_long(&uip->ui_proccnt, (long)diff);
1364		if (uip->ui_proccnt < 0)
1365			printf("negative proccnt for uid = %d\n", uip->ui_uid);
1366	}
1367	return (1);
1368}
1369
1370/*
1371 * Change the total socket buffer size a user has used.
1372 */
1373int
1374chgsbsize(uip, hiwat, to, max)
1375	struct	uidinfo	*uip;
1376	u_int  *hiwat;
1377	u_int	to;
1378	rlim_t	max;
1379{
1380	int diff;
1381
1382	diff = to - *hiwat;
1383	if (diff > 0) {
1384		if (atomic_fetchadd_long(&uip->ui_sbsize, (long)diff) + diff > max) {
1385			atomic_subtract_long(&uip->ui_sbsize, (long)diff);
1386			return (0);
1387		}
1388	} else {
1389		atomic_add_long(&uip->ui_sbsize, (long)diff);
1390		if (uip->ui_sbsize < 0)
1391			printf("negative sbsize for uid = %d\n", uip->ui_uid);
1392	}
1393	*hiwat = to;
1394	return (1);
1395}
1396
1397/*
1398 * Change the count associated with number of pseudo-terminals
1399 * a given user is using.  When 'max' is 0, don't enforce a limit
1400 */
1401int
1402chgptscnt(uip, diff, max)
1403	struct	uidinfo	*uip;
1404	int	diff;
1405	rlim_t	max;
1406{
1407
1408	/* Don't allow them to exceed max, but allow subtraction. */
1409	if (diff > 0 && max != 0) {
1410		if (atomic_fetchadd_long(&uip->ui_ptscnt, (long)diff) + diff > max) {
1411			atomic_subtract_long(&uip->ui_ptscnt, (long)diff);
1412			return (0);
1413		}
1414	} else {
1415		atomic_add_long(&uip->ui_ptscnt, (long)diff);
1416		if (uip->ui_ptscnt < 0)
1417			printf("negative ptscnt for uid = %d\n", uip->ui_uid);
1418	}
1419	return (1);
1420}
1421