kern_resource.c revision 330897
1/*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1982, 1986, 1991, 1993
5 *	The Regents of the University of California.  All rights reserved.
6 * (c) UNIX System Laboratories, Inc.
7 * All or some portions of this file are derived from material licensed
8 * to the University of California by American Telephone and Telegraph
9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10 * the permission of UNIX System Laboratories, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 4. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
37 */
38
39#include <sys/cdefs.h>
40__FBSDID("$FreeBSD: stable/11/sys/kern/kern_resource.c 330897 2018-03-14 03:19:51Z eadler $");
41
42#include "opt_compat.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/file.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/malloc.h>
51#include <sys/mutex.h>
52#include <sys/priv.h>
53#include <sys/proc.h>
54#include <sys/refcount.h>
55#include <sys/racct.h>
56#include <sys/resourcevar.h>
57#include <sys/rwlock.h>
58#include <sys/sched.h>
59#include <sys/sx.h>
60#include <sys/syscallsubr.h>
61#include <sys/sysctl.h>
62#include <sys/sysent.h>
63#include <sys/time.h>
64#include <sys/umtx.h>
65
66#include <vm/vm.h>
67#include <vm/vm_param.h>
68#include <vm/pmap.h>
69#include <vm/vm_map.h>
70
71
72static MALLOC_DEFINE(M_PLIMIT, "plimit", "plimit structures");
73static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
74#define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
75static struct rwlock uihashtbl_lock;
76static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
77static u_long uihash;		/* size of hash table - 1 */
78
79static void	calcru1(struct proc *p, struct rusage_ext *ruxp,
80		    struct timeval *up, struct timeval *sp);
81static int	donice(struct thread *td, struct proc *chgp, int n);
82static struct uidinfo *uilookup(uid_t uid);
83static void	ruxagg_locked(struct rusage_ext *rux, struct thread *td);
84
85/*
86 * Resource controls and accounting.
87 */
88#ifndef _SYS_SYSPROTO_H_
89struct getpriority_args {
90	int	which;
91	int	who;
92};
93#endif
94int
95sys_getpriority(struct thread *td, register struct getpriority_args *uap)
96{
97	struct proc *p;
98	struct pgrp *pg;
99	int error, low;
100
101	error = 0;
102	low = PRIO_MAX + 1;
103	switch (uap->which) {
104
105	case PRIO_PROCESS:
106		if (uap->who == 0)
107			low = td->td_proc->p_nice;
108		else {
109			p = pfind(uap->who);
110			if (p == NULL)
111				break;
112			if (p_cansee(td, p) == 0)
113				low = p->p_nice;
114			PROC_UNLOCK(p);
115		}
116		break;
117
118	case PRIO_PGRP:
119		sx_slock(&proctree_lock);
120		if (uap->who == 0) {
121			pg = td->td_proc->p_pgrp;
122			PGRP_LOCK(pg);
123		} else {
124			pg = pgfind(uap->who);
125			if (pg == NULL) {
126				sx_sunlock(&proctree_lock);
127				break;
128			}
129		}
130		sx_sunlock(&proctree_lock);
131		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
132			PROC_LOCK(p);
133			if (p->p_state == PRS_NORMAL &&
134			    p_cansee(td, p) == 0) {
135				if (p->p_nice < low)
136					low = p->p_nice;
137			}
138			PROC_UNLOCK(p);
139		}
140		PGRP_UNLOCK(pg);
141		break;
142
143	case PRIO_USER:
144		if (uap->who == 0)
145			uap->who = td->td_ucred->cr_uid;
146		sx_slock(&allproc_lock);
147		FOREACH_PROC_IN_SYSTEM(p) {
148			PROC_LOCK(p);
149			if (p->p_state == PRS_NORMAL &&
150			    p_cansee(td, p) == 0 &&
151			    p->p_ucred->cr_uid == uap->who) {
152				if (p->p_nice < low)
153					low = p->p_nice;
154			}
155			PROC_UNLOCK(p);
156		}
157		sx_sunlock(&allproc_lock);
158		break;
159
160	default:
161		error = EINVAL;
162		break;
163	}
164	if (low == PRIO_MAX + 1 && error == 0)
165		error = ESRCH;
166	td->td_retval[0] = low;
167	return (error);
168}
169
170#ifndef _SYS_SYSPROTO_H_
171struct setpriority_args {
172	int	which;
173	int	who;
174	int	prio;
175};
176#endif
177int
178sys_setpriority(struct thread *td, struct setpriority_args *uap)
179{
180	struct proc *curp, *p;
181	struct pgrp *pg;
182	int found = 0, error = 0;
183
184	curp = td->td_proc;
185	switch (uap->which) {
186	case PRIO_PROCESS:
187		if (uap->who == 0) {
188			PROC_LOCK(curp);
189			error = donice(td, curp, uap->prio);
190			PROC_UNLOCK(curp);
191		} else {
192			p = pfind(uap->who);
193			if (p == NULL)
194				break;
195			error = p_cansee(td, p);
196			if (error == 0)
197				error = donice(td, p, uap->prio);
198			PROC_UNLOCK(p);
199		}
200		found++;
201		break;
202
203	case PRIO_PGRP:
204		sx_slock(&proctree_lock);
205		if (uap->who == 0) {
206			pg = curp->p_pgrp;
207			PGRP_LOCK(pg);
208		} else {
209			pg = pgfind(uap->who);
210			if (pg == NULL) {
211				sx_sunlock(&proctree_lock);
212				break;
213			}
214		}
215		sx_sunlock(&proctree_lock);
216		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
217			PROC_LOCK(p);
218			if (p->p_state == PRS_NORMAL &&
219			    p_cansee(td, p) == 0) {
220				error = donice(td, p, uap->prio);
221				found++;
222			}
223			PROC_UNLOCK(p);
224		}
225		PGRP_UNLOCK(pg);
226		break;
227
228	case PRIO_USER:
229		if (uap->who == 0)
230			uap->who = td->td_ucred->cr_uid;
231		sx_slock(&allproc_lock);
232		FOREACH_PROC_IN_SYSTEM(p) {
233			PROC_LOCK(p);
234			if (p->p_state == PRS_NORMAL &&
235			    p->p_ucred->cr_uid == uap->who &&
236			    p_cansee(td, p) == 0) {
237				error = donice(td, p, uap->prio);
238				found++;
239			}
240			PROC_UNLOCK(p);
241		}
242		sx_sunlock(&allproc_lock);
243		break;
244
245	default:
246		error = EINVAL;
247		break;
248	}
249	if (found == 0 && error == 0)
250		error = ESRCH;
251	return (error);
252}
253
254/*
255 * Set "nice" for a (whole) process.
256 */
257static int
258donice(struct thread *td, struct proc *p, int n)
259{
260	int error;
261
262	PROC_LOCK_ASSERT(p, MA_OWNED);
263	if ((error = p_cansched(td, p)))
264		return (error);
265	if (n > PRIO_MAX)
266		n = PRIO_MAX;
267	if (n < PRIO_MIN)
268		n = PRIO_MIN;
269	if (n < p->p_nice && priv_check(td, PRIV_SCHED_SETPRIORITY) != 0)
270		return (EACCES);
271	sched_nice(p, n);
272	return (0);
273}
274
275static int unprivileged_idprio;
276SYSCTL_INT(_security_bsd, OID_AUTO, unprivileged_idprio, CTLFLAG_RW,
277    &unprivileged_idprio, 0, "Allow non-root users to set an idle priority");
278
279/*
280 * Set realtime priority for LWP.
281 */
282#ifndef _SYS_SYSPROTO_H_
283struct rtprio_thread_args {
284	int		function;
285	lwpid_t		lwpid;
286	struct rtprio	*rtp;
287};
288#endif
289int
290sys_rtprio_thread(struct thread *td, struct rtprio_thread_args *uap)
291{
292	struct proc *p;
293	struct rtprio rtp;
294	struct thread *td1;
295	int cierror, error;
296
297	/* Perform copyin before acquiring locks if needed. */
298	if (uap->function == RTP_SET)
299		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
300	else
301		cierror = 0;
302
303	if (uap->lwpid == 0 || uap->lwpid == td->td_tid) {
304		p = td->td_proc;
305		td1 = td;
306		PROC_LOCK(p);
307	} else {
308		/* Only look up thread in current process */
309		td1 = tdfind(uap->lwpid, curproc->p_pid);
310		if (td1 == NULL)
311			return (ESRCH);
312		p = td1->td_proc;
313	}
314
315	switch (uap->function) {
316	case RTP_LOOKUP:
317		if ((error = p_cansee(td, p)))
318			break;
319		pri_to_rtp(td1, &rtp);
320		PROC_UNLOCK(p);
321		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
322	case RTP_SET:
323		if ((error = p_cansched(td, p)) || (error = cierror))
324			break;
325
326		/* Disallow setting rtprio in most cases if not superuser. */
327
328		/*
329		 * Realtime priority has to be restricted for reasons which
330		 * should be obvious.  However, for idleprio processes, there is
331		 * a potential for system deadlock if an idleprio process gains
332		 * a lock on a resource that other processes need (and the
333		 * idleprio process can't run due to a CPU-bound normal
334		 * process).  Fix me!  XXX
335		 *
336		 * This problem is not only related to idleprio process.
337		 * A user level program can obtain a file lock and hold it
338		 * indefinitely.  Additionally, without idleprio processes it is
339		 * still conceivable that a program with low priority will never
340		 * get to run.  In short, allowing this feature might make it
341		 * easier to lock a resource indefinitely, but it is not the
342		 * only thing that makes it possible.
343		 */
344		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
345		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
346		    unprivileged_idprio == 0)) {
347			error = priv_check(td, PRIV_SCHED_RTPRIO);
348			if (error)
349				break;
350		}
351		error = rtp_to_pri(&rtp, td1);
352		break;
353	default:
354		error = EINVAL;
355		break;
356	}
357	PROC_UNLOCK(p);
358	return (error);
359}
360
361/*
362 * Set realtime priority.
363 */
364#ifndef _SYS_SYSPROTO_H_
365struct rtprio_args {
366	int		function;
367	pid_t		pid;
368	struct rtprio	*rtp;
369};
370#endif
371int
372sys_rtprio(struct thread *td, register struct rtprio_args *uap)
373{
374	struct proc *p;
375	struct thread *tdp;
376	struct rtprio rtp;
377	int cierror, error;
378
379	/* Perform copyin before acquiring locks if needed. */
380	if (uap->function == RTP_SET)
381		cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
382	else
383		cierror = 0;
384
385	if (uap->pid == 0) {
386		p = td->td_proc;
387		PROC_LOCK(p);
388	} else {
389		p = pfind(uap->pid);
390		if (p == NULL)
391			return (ESRCH);
392	}
393
394	switch (uap->function) {
395	case RTP_LOOKUP:
396		if ((error = p_cansee(td, p)))
397			break;
398		/*
399		 * Return OUR priority if no pid specified,
400		 * or if one is, report the highest priority
401		 * in the process.  There isn't much more you can do as
402		 * there is only room to return a single priority.
403		 * Note: specifying our own pid is not the same
404		 * as leaving it zero.
405		 */
406		if (uap->pid == 0) {
407			pri_to_rtp(td, &rtp);
408		} else {
409			struct rtprio rtp2;
410
411			rtp.type = RTP_PRIO_IDLE;
412			rtp.prio = RTP_PRIO_MAX;
413			FOREACH_THREAD_IN_PROC(p, tdp) {
414				pri_to_rtp(tdp, &rtp2);
415				if (rtp2.type <  rtp.type ||
416				    (rtp2.type == rtp.type &&
417				    rtp2.prio < rtp.prio)) {
418					rtp.type = rtp2.type;
419					rtp.prio = rtp2.prio;
420				}
421			}
422		}
423		PROC_UNLOCK(p);
424		return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
425	case RTP_SET:
426		if ((error = p_cansched(td, p)) || (error = cierror))
427			break;
428
429		/*
430		 * Disallow setting rtprio in most cases if not superuser.
431		 * See the comment in sys_rtprio_thread about idprio
432		 * threads holding a lock.
433		 */
434		if (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_REALTIME ||
435		    (RTP_PRIO_BASE(rtp.type) == RTP_PRIO_IDLE &&
436		    !unprivileged_idprio)) {
437			error = priv_check(td, PRIV_SCHED_RTPRIO);
438			if (error)
439				break;
440		}
441
442		/*
443		 * If we are setting our own priority, set just our
444		 * thread but if we are doing another process,
445		 * do all the threads on that process. If we
446		 * specify our own pid we do the latter.
447		 */
448		if (uap->pid == 0) {
449			error = rtp_to_pri(&rtp, td);
450		} else {
451			FOREACH_THREAD_IN_PROC(p, td) {
452				if ((error = rtp_to_pri(&rtp, td)) != 0)
453					break;
454			}
455		}
456		break;
457	default:
458		error = EINVAL;
459		break;
460	}
461	PROC_UNLOCK(p);
462	return (error);
463}
464
465int
466rtp_to_pri(struct rtprio *rtp, struct thread *td)
467{
468	u_char  newpri, oldclass, oldpri;
469
470	switch (RTP_PRIO_BASE(rtp->type)) {
471	case RTP_PRIO_REALTIME:
472		if (rtp->prio > RTP_PRIO_MAX)
473			return (EINVAL);
474		newpri = PRI_MIN_REALTIME + rtp->prio;
475		break;
476	case RTP_PRIO_NORMAL:
477		if (rtp->prio > (PRI_MAX_TIMESHARE - PRI_MIN_TIMESHARE))
478			return (EINVAL);
479		newpri = PRI_MIN_TIMESHARE + rtp->prio;
480		break;
481	case RTP_PRIO_IDLE:
482		if (rtp->prio > RTP_PRIO_MAX)
483			return (EINVAL);
484		newpri = PRI_MIN_IDLE + rtp->prio;
485		break;
486	default:
487		return (EINVAL);
488	}
489
490	thread_lock(td);
491	oldclass = td->td_pri_class;
492	sched_class(td, rtp->type);	/* XXX fix */
493	oldpri = td->td_user_pri;
494	sched_user_prio(td, newpri);
495	if (td->td_user_pri != oldpri && (oldclass != RTP_PRIO_NORMAL ||
496	    td->td_pri_class != RTP_PRIO_NORMAL))
497		sched_prio(td, td->td_user_pri);
498	if (TD_ON_UPILOCK(td) && oldpri != newpri) {
499		critical_enter();
500		thread_unlock(td);
501		umtx_pi_adjust(td, oldpri);
502		critical_exit();
503	} else
504		thread_unlock(td);
505	return (0);
506}
507
508void
509pri_to_rtp(struct thread *td, struct rtprio *rtp)
510{
511
512	thread_lock(td);
513	switch (PRI_BASE(td->td_pri_class)) {
514	case PRI_REALTIME:
515		rtp->prio = td->td_base_user_pri - PRI_MIN_REALTIME;
516		break;
517	case PRI_TIMESHARE:
518		rtp->prio = td->td_base_user_pri - PRI_MIN_TIMESHARE;
519		break;
520	case PRI_IDLE:
521		rtp->prio = td->td_base_user_pri - PRI_MIN_IDLE;
522		break;
523	default:
524		break;
525	}
526	rtp->type = td->td_pri_class;
527	thread_unlock(td);
528}
529
530#if defined(COMPAT_43)
531#ifndef _SYS_SYSPROTO_H_
532struct osetrlimit_args {
533	u_int	which;
534	struct	orlimit *rlp;
535};
536#endif
537int
538osetrlimit(struct thread *td, register struct osetrlimit_args *uap)
539{
540	struct orlimit olim;
541	struct rlimit lim;
542	int error;
543
544	if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
545		return (error);
546	lim.rlim_cur = olim.rlim_cur;
547	lim.rlim_max = olim.rlim_max;
548	error = kern_setrlimit(td, uap->which, &lim);
549	return (error);
550}
551
552#ifndef _SYS_SYSPROTO_H_
553struct ogetrlimit_args {
554	u_int	which;
555	struct	orlimit *rlp;
556};
557#endif
558int
559ogetrlimit(struct thread *td, register struct ogetrlimit_args *uap)
560{
561	struct orlimit olim;
562	struct rlimit rl;
563	int error;
564
565	if (uap->which >= RLIM_NLIMITS)
566		return (EINVAL);
567	lim_rlimit(td, uap->which, &rl);
568
569	/*
570	 * XXX would be more correct to convert only RLIM_INFINITY to the
571	 * old RLIM_INFINITY and fail with EOVERFLOW for other larger
572	 * values.  Most 64->32 and 32->16 conversions, including not
573	 * unimportant ones of uids are even more broken than what we
574	 * do here (they blindly truncate).  We don't do this correctly
575	 * here since we have little experience with EOVERFLOW yet.
576	 * Elsewhere, getuid() can't fail...
577	 */
578	olim.rlim_cur = rl.rlim_cur > 0x7fffffff ? 0x7fffffff : rl.rlim_cur;
579	olim.rlim_max = rl.rlim_max > 0x7fffffff ? 0x7fffffff : rl.rlim_max;
580	error = copyout(&olim, uap->rlp, sizeof(olim));
581	return (error);
582}
583#endif /* COMPAT_43 */
584
585#ifndef _SYS_SYSPROTO_H_
586struct __setrlimit_args {
587	u_int	which;
588	struct	rlimit *rlp;
589};
590#endif
591int
592sys_setrlimit(struct thread *td, register struct __setrlimit_args *uap)
593{
594	struct rlimit alim;
595	int error;
596
597	if ((error = copyin(uap->rlp, &alim, sizeof(struct rlimit))))
598		return (error);
599	error = kern_setrlimit(td, uap->which, &alim);
600	return (error);
601}
602
603static void
604lim_cb(void *arg)
605{
606	struct rlimit rlim;
607	struct thread *td;
608	struct proc *p;
609
610	p = arg;
611	PROC_LOCK_ASSERT(p, MA_OWNED);
612	/*
613	 * Check if the process exceeds its cpu resource allocation.  If
614	 * it reaches the max, arrange to kill the process in ast().
615	 */
616	if (p->p_cpulimit == RLIM_INFINITY)
617		return;
618	PROC_STATLOCK(p);
619	FOREACH_THREAD_IN_PROC(p, td) {
620		ruxagg(p, td);
621	}
622	PROC_STATUNLOCK(p);
623	if (p->p_rux.rux_runtime > p->p_cpulimit * cpu_tickrate()) {
624		lim_rlimit_proc(p, RLIMIT_CPU, &rlim);
625		if (p->p_rux.rux_runtime >= rlim.rlim_max * cpu_tickrate()) {
626			killproc(p, "exceeded maximum CPU limit");
627		} else {
628			if (p->p_cpulimit < rlim.rlim_max)
629				p->p_cpulimit += 5;
630			kern_psignal(p, SIGXCPU);
631		}
632	}
633	if ((p->p_flag & P_WEXIT) == 0)
634		callout_reset_sbt(&p->p_limco, SBT_1S, 0,
635		    lim_cb, p, C_PREL(1));
636}
637
638int
639kern_setrlimit(struct thread *td, u_int which, struct rlimit *limp)
640{
641
642	return (kern_proc_setrlimit(td, td->td_proc, which, limp));
643}
644
645int
646kern_proc_setrlimit(struct thread *td, struct proc *p, u_int which,
647    struct rlimit *limp)
648{
649	struct plimit *newlim, *oldlim;
650	register struct rlimit *alimp;
651	struct rlimit oldssiz;
652	int error;
653
654	if (which >= RLIM_NLIMITS)
655		return (EINVAL);
656
657	/*
658	 * Preserve historical bugs by treating negative limits as unsigned.
659	 */
660	if (limp->rlim_cur < 0)
661		limp->rlim_cur = RLIM_INFINITY;
662	if (limp->rlim_max < 0)
663		limp->rlim_max = RLIM_INFINITY;
664
665	oldssiz.rlim_cur = 0;
666	newlim = lim_alloc();
667	PROC_LOCK(p);
668	oldlim = p->p_limit;
669	alimp = &oldlim->pl_rlimit[which];
670	if (limp->rlim_cur > alimp->rlim_max ||
671	    limp->rlim_max > alimp->rlim_max)
672		if ((error = priv_check(td, PRIV_PROC_SETRLIMIT))) {
673			PROC_UNLOCK(p);
674			lim_free(newlim);
675			return (error);
676		}
677	if (limp->rlim_cur > limp->rlim_max)
678		limp->rlim_cur = limp->rlim_max;
679	lim_copy(newlim, oldlim);
680	alimp = &newlim->pl_rlimit[which];
681
682	switch (which) {
683
684	case RLIMIT_CPU:
685		if (limp->rlim_cur != RLIM_INFINITY &&
686		    p->p_cpulimit == RLIM_INFINITY)
687			callout_reset_sbt(&p->p_limco, SBT_1S, 0,
688			    lim_cb, p, C_PREL(1));
689		p->p_cpulimit = limp->rlim_cur;
690		break;
691	case RLIMIT_DATA:
692		if (limp->rlim_cur > maxdsiz)
693			limp->rlim_cur = maxdsiz;
694		if (limp->rlim_max > maxdsiz)
695			limp->rlim_max = maxdsiz;
696		break;
697
698	case RLIMIT_STACK:
699		if (limp->rlim_cur > maxssiz)
700			limp->rlim_cur = maxssiz;
701		if (limp->rlim_max > maxssiz)
702			limp->rlim_max = maxssiz;
703		oldssiz = *alimp;
704		if (p->p_sysent->sv_fixlimit != NULL)
705			p->p_sysent->sv_fixlimit(&oldssiz,
706			    RLIMIT_STACK);
707		break;
708
709	case RLIMIT_NOFILE:
710		if (limp->rlim_cur > maxfilesperproc)
711			limp->rlim_cur = maxfilesperproc;
712		if (limp->rlim_max > maxfilesperproc)
713			limp->rlim_max = maxfilesperproc;
714		break;
715
716	case RLIMIT_NPROC:
717		if (limp->rlim_cur > maxprocperuid)
718			limp->rlim_cur = maxprocperuid;
719		if (limp->rlim_max > maxprocperuid)
720			limp->rlim_max = maxprocperuid;
721		if (limp->rlim_cur < 1)
722			limp->rlim_cur = 1;
723		if (limp->rlim_max < 1)
724			limp->rlim_max = 1;
725		break;
726	}
727	if (p->p_sysent->sv_fixlimit != NULL)
728		p->p_sysent->sv_fixlimit(limp, which);
729	*alimp = *limp;
730	p->p_limit = newlim;
731	PROC_UPDATE_COW(p);
732	PROC_UNLOCK(p);
733	lim_free(oldlim);
734
735	if (which == RLIMIT_STACK &&
736	    /*
737	     * Skip calls from exec_new_vmspace(), done when stack is
738	     * not mapped yet.
739	     */
740	    (td != curthread || (p->p_flag & P_INEXEC) == 0)) {
741		/*
742		 * Stack is allocated to the max at exec time with only
743		 * "rlim_cur" bytes accessible.  If stack limit is going
744		 * up make more accessible, if going down make inaccessible.
745		 */
746		if (limp->rlim_cur != oldssiz.rlim_cur) {
747			vm_offset_t addr;
748			vm_size_t size;
749			vm_prot_t prot;
750
751			if (limp->rlim_cur > oldssiz.rlim_cur) {
752				prot = p->p_sysent->sv_stackprot;
753				size = limp->rlim_cur - oldssiz.rlim_cur;
754				addr = p->p_sysent->sv_usrstack -
755				    limp->rlim_cur;
756			} else {
757				prot = VM_PROT_NONE;
758				size = oldssiz.rlim_cur - limp->rlim_cur;
759				addr = p->p_sysent->sv_usrstack -
760				    oldssiz.rlim_cur;
761			}
762			addr = trunc_page(addr);
763			size = round_page(size);
764			(void)vm_map_protect(&p->p_vmspace->vm_map,
765			    addr, addr + size, prot, FALSE);
766		}
767	}
768
769	return (0);
770}
771
772#ifndef _SYS_SYSPROTO_H_
773struct __getrlimit_args {
774	u_int	which;
775	struct	rlimit *rlp;
776};
777#endif
778/* ARGSUSED */
779int
780sys_getrlimit(struct thread *td, register struct __getrlimit_args *uap)
781{
782	struct rlimit rlim;
783	int error;
784
785	if (uap->which >= RLIM_NLIMITS)
786		return (EINVAL);
787	lim_rlimit(td, uap->which, &rlim);
788	error = copyout(&rlim, uap->rlp, sizeof(struct rlimit));
789	return (error);
790}
791
792/*
793 * Transform the running time and tick information for children of proc p
794 * into user and system time usage.
795 */
796void
797calccru(struct proc *p, struct timeval *up, struct timeval *sp)
798{
799
800	PROC_LOCK_ASSERT(p, MA_OWNED);
801	calcru1(p, &p->p_crux, up, sp);
802}
803
804/*
805 * Transform the running time and tick information in proc p into user
806 * and system time usage.  If appropriate, include the current time slice
807 * on this CPU.
808 */
809void
810calcru(struct proc *p, struct timeval *up, struct timeval *sp)
811{
812	struct thread *td;
813	uint64_t runtime, u;
814
815	PROC_LOCK_ASSERT(p, MA_OWNED);
816	PROC_STATLOCK_ASSERT(p, MA_OWNED);
817	/*
818	 * If we are getting stats for the current process, then add in the
819	 * stats that this thread has accumulated in its current time slice.
820	 * We reset the thread and CPU state as if we had performed a context
821	 * switch right here.
822	 */
823	td = curthread;
824	if (td->td_proc == p) {
825		u = cpu_ticks();
826		runtime = u - PCPU_GET(switchtime);
827		td->td_runtime += runtime;
828		td->td_incruntime += runtime;
829		PCPU_SET(switchtime, u);
830	}
831	/* Make sure the per-thread stats are current. */
832	FOREACH_THREAD_IN_PROC(p, td) {
833		if (td->td_incruntime == 0)
834			continue;
835		ruxagg(p, td);
836	}
837	calcru1(p, &p->p_rux, up, sp);
838}
839
840/* Collect resource usage for a single thread. */
841void
842rufetchtd(struct thread *td, struct rusage *ru)
843{
844	struct proc *p;
845	uint64_t runtime, u;
846
847	p = td->td_proc;
848	PROC_STATLOCK_ASSERT(p, MA_OWNED);
849	THREAD_LOCK_ASSERT(td, MA_OWNED);
850	/*
851	 * If we are getting stats for the current thread, then add in the
852	 * stats that this thread has accumulated in its current time slice.
853	 * We reset the thread and CPU state as if we had performed a context
854	 * switch right here.
855	 */
856	if (td == curthread) {
857		u = cpu_ticks();
858		runtime = u - PCPU_GET(switchtime);
859		td->td_runtime += runtime;
860		td->td_incruntime += runtime;
861		PCPU_SET(switchtime, u);
862	}
863	ruxagg(p, td);
864	*ru = td->td_ru;
865	calcru1(p, &td->td_rux, &ru->ru_utime, &ru->ru_stime);
866}
867
868static void
869calcru1(struct proc *p, struct rusage_ext *ruxp, struct timeval *up,
870    struct timeval *sp)
871{
872	/* {user, system, interrupt, total} {ticks, usec}: */
873	uint64_t ut, uu, st, su, it, tt, tu;
874
875	ut = ruxp->rux_uticks;
876	st = ruxp->rux_sticks;
877	it = ruxp->rux_iticks;
878	tt = ut + st + it;
879	if (tt == 0) {
880		/* Avoid divide by zero */
881		st = 1;
882		tt = 1;
883	}
884	tu = cputick2usec(ruxp->rux_runtime);
885	if ((int64_t)tu < 0) {
886		/* XXX: this should be an assert /phk */
887		printf("calcru: negative runtime of %jd usec for pid %d (%s)\n",
888		    (intmax_t)tu, p->p_pid, p->p_comm);
889		tu = ruxp->rux_tu;
890	}
891
892	if (tu >= ruxp->rux_tu) {
893		/*
894		 * The normal case, time increased.
895		 * Enforce monotonicity of bucketed numbers.
896		 */
897		uu = (tu * ut) / tt;
898		if (uu < ruxp->rux_uu)
899			uu = ruxp->rux_uu;
900		su = (tu * st) / tt;
901		if (su < ruxp->rux_su)
902			su = ruxp->rux_su;
903	} else if (tu + 3 > ruxp->rux_tu || 101 * tu > 100 * ruxp->rux_tu) {
904		/*
905		 * When we calibrate the cputicker, it is not uncommon to
906		 * see the presumably fixed frequency increase slightly over
907		 * time as a result of thermal stabilization and NTP
908		 * discipline (of the reference clock).  We therefore ignore
909		 * a bit of backwards slop because we  expect to catch up
910		 * shortly.  We use a 3 microsecond limit to catch low
911		 * counts and a 1% limit for high counts.
912		 */
913		uu = ruxp->rux_uu;
914		su = ruxp->rux_su;
915		tu = ruxp->rux_tu;
916	} else { /* tu < ruxp->rux_tu */
917		/*
918		 * What happened here was likely that a laptop, which ran at
919		 * a reduced clock frequency at boot, kicked into high gear.
920		 * The wisdom of spamming this message in that case is
921		 * dubious, but it might also be indicative of something
922		 * serious, so lets keep it and hope laptops can be made
923		 * more truthful about their CPU speed via ACPI.
924		 */
925		printf("calcru: runtime went backwards from %ju usec "
926		    "to %ju usec for pid %d (%s)\n",
927		    (uintmax_t)ruxp->rux_tu, (uintmax_t)tu,
928		    p->p_pid, p->p_comm);
929		uu = (tu * ut) / tt;
930		su = (tu * st) / tt;
931	}
932
933	ruxp->rux_uu = uu;
934	ruxp->rux_su = su;
935	ruxp->rux_tu = tu;
936
937	up->tv_sec = uu / 1000000;
938	up->tv_usec = uu % 1000000;
939	sp->tv_sec = su / 1000000;
940	sp->tv_usec = su % 1000000;
941}
942
943#ifndef _SYS_SYSPROTO_H_
944struct getrusage_args {
945	int	who;
946	struct	rusage *rusage;
947};
948#endif
949int
950sys_getrusage(register struct thread *td, register struct getrusage_args *uap)
951{
952	struct rusage ru;
953	int error;
954
955	error = kern_getrusage(td, uap->who, &ru);
956	if (error == 0)
957		error = copyout(&ru, uap->rusage, sizeof(struct rusage));
958	return (error);
959}
960
961int
962kern_getrusage(struct thread *td, int who, struct rusage *rup)
963{
964	struct proc *p;
965	int error;
966
967	error = 0;
968	p = td->td_proc;
969	PROC_LOCK(p);
970	switch (who) {
971	case RUSAGE_SELF:
972		rufetchcalc(p, rup, &rup->ru_utime,
973		    &rup->ru_stime);
974		break;
975
976	case RUSAGE_CHILDREN:
977		*rup = p->p_stats->p_cru;
978		calccru(p, &rup->ru_utime, &rup->ru_stime);
979		break;
980
981	case RUSAGE_THREAD:
982		PROC_STATLOCK(p);
983		thread_lock(td);
984		rufetchtd(td, rup);
985		thread_unlock(td);
986		PROC_STATUNLOCK(p);
987		break;
988
989	default:
990		error = EINVAL;
991	}
992	PROC_UNLOCK(p);
993	return (error);
994}
995
996void
997rucollect(struct rusage *ru, struct rusage *ru2)
998{
999	long *ip, *ip2;
1000	int i;
1001
1002	if (ru->ru_maxrss < ru2->ru_maxrss)
1003		ru->ru_maxrss = ru2->ru_maxrss;
1004	ip = &ru->ru_first;
1005	ip2 = &ru2->ru_first;
1006	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1007		*ip++ += *ip2++;
1008}
1009
1010void
1011ruadd(struct rusage *ru, struct rusage_ext *rux, struct rusage *ru2,
1012    struct rusage_ext *rux2)
1013{
1014
1015	rux->rux_runtime += rux2->rux_runtime;
1016	rux->rux_uticks += rux2->rux_uticks;
1017	rux->rux_sticks += rux2->rux_sticks;
1018	rux->rux_iticks += rux2->rux_iticks;
1019	rux->rux_uu += rux2->rux_uu;
1020	rux->rux_su += rux2->rux_su;
1021	rux->rux_tu += rux2->rux_tu;
1022	rucollect(ru, ru2);
1023}
1024
1025/*
1026 * Aggregate tick counts into the proc's rusage_ext.
1027 */
1028static void
1029ruxagg_locked(struct rusage_ext *rux, struct thread *td)
1030{
1031
1032	THREAD_LOCK_ASSERT(td, MA_OWNED);
1033	PROC_STATLOCK_ASSERT(td->td_proc, MA_OWNED);
1034	rux->rux_runtime += td->td_incruntime;
1035	rux->rux_uticks += td->td_uticks;
1036	rux->rux_sticks += td->td_sticks;
1037	rux->rux_iticks += td->td_iticks;
1038}
1039
1040void
1041ruxagg(struct proc *p, struct thread *td)
1042{
1043
1044	thread_lock(td);
1045	ruxagg_locked(&p->p_rux, td);
1046	ruxagg_locked(&td->td_rux, td);
1047	td->td_incruntime = 0;
1048	td->td_uticks = 0;
1049	td->td_iticks = 0;
1050	td->td_sticks = 0;
1051	thread_unlock(td);
1052}
1053
1054/*
1055 * Update the rusage_ext structure and fetch a valid aggregate rusage
1056 * for proc p if storage for one is supplied.
1057 */
1058void
1059rufetch(struct proc *p, struct rusage *ru)
1060{
1061	struct thread *td;
1062
1063	PROC_STATLOCK_ASSERT(p, MA_OWNED);
1064
1065	*ru = p->p_ru;
1066	if (p->p_numthreads > 0)  {
1067		FOREACH_THREAD_IN_PROC(p, td) {
1068			ruxagg(p, td);
1069			rucollect(ru, &td->td_ru);
1070		}
1071	}
1072}
1073
1074/*
1075 * Atomically perform a rufetch and a calcru together.
1076 * Consumers, can safely assume the calcru is executed only once
1077 * rufetch is completed.
1078 */
1079void
1080rufetchcalc(struct proc *p, struct rusage *ru, struct timeval *up,
1081    struct timeval *sp)
1082{
1083
1084	PROC_STATLOCK(p);
1085	rufetch(p, ru);
1086	calcru(p, up, sp);
1087	PROC_STATUNLOCK(p);
1088}
1089
1090/*
1091 * Allocate a new resource limits structure and initialize its
1092 * reference count and mutex pointer.
1093 */
1094struct plimit *
1095lim_alloc()
1096{
1097	struct plimit *limp;
1098
1099	limp = malloc(sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1100	refcount_init(&limp->pl_refcnt, 1);
1101	return (limp);
1102}
1103
1104struct plimit *
1105lim_hold(struct plimit *limp)
1106{
1107
1108	refcount_acquire(&limp->pl_refcnt);
1109	return (limp);
1110}
1111
1112void
1113lim_fork(struct proc *p1, struct proc *p2)
1114{
1115
1116	PROC_LOCK_ASSERT(p1, MA_OWNED);
1117	PROC_LOCK_ASSERT(p2, MA_OWNED);
1118
1119	p2->p_limit = lim_hold(p1->p_limit);
1120	callout_init_mtx(&p2->p_limco, &p2->p_mtx, 0);
1121	if (p1->p_cpulimit != RLIM_INFINITY)
1122		callout_reset_sbt(&p2->p_limco, SBT_1S, 0,
1123		    lim_cb, p2, C_PREL(1));
1124}
1125
1126void
1127lim_free(struct plimit *limp)
1128{
1129
1130	if (refcount_release(&limp->pl_refcnt))
1131		free((void *)limp, M_PLIMIT);
1132}
1133
1134/*
1135 * Make a copy of the plimit structure.
1136 * We share these structures copy-on-write after fork.
1137 */
1138void
1139lim_copy(struct plimit *dst, struct plimit *src)
1140{
1141
1142	KASSERT(dst->pl_refcnt <= 1, ("lim_copy to shared limit"));
1143	bcopy(src->pl_rlimit, dst->pl_rlimit, sizeof(src->pl_rlimit));
1144}
1145
1146/*
1147 * Return the hard limit for a particular system resource.  The
1148 * which parameter specifies the index into the rlimit array.
1149 */
1150rlim_t
1151lim_max(struct thread *td, int which)
1152{
1153	struct rlimit rl;
1154
1155	lim_rlimit(td, which, &rl);
1156	return (rl.rlim_max);
1157}
1158
1159rlim_t
1160lim_max_proc(struct proc *p, int which)
1161{
1162	struct rlimit rl;
1163
1164	lim_rlimit_proc(p, which, &rl);
1165	return (rl.rlim_max);
1166}
1167
1168/*
1169 * Return the current (soft) limit for a particular system resource.
1170 * The which parameter which specifies the index into the rlimit array
1171 */
1172rlim_t
1173lim_cur(struct thread *td, int which)
1174{
1175	struct rlimit rl;
1176
1177	lim_rlimit(td, which, &rl);
1178	return (rl.rlim_cur);
1179}
1180
1181rlim_t
1182lim_cur_proc(struct proc *p, int which)
1183{
1184	struct rlimit rl;
1185
1186	lim_rlimit_proc(p, which, &rl);
1187	return (rl.rlim_cur);
1188}
1189
1190/*
1191 * Return a copy of the entire rlimit structure for the system limit
1192 * specified by 'which' in the rlimit structure pointed to by 'rlp'.
1193 */
1194void
1195lim_rlimit(struct thread *td, int which, struct rlimit *rlp)
1196{
1197	struct proc *p = td->td_proc;
1198
1199	MPASS(td == curthread);
1200	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1201	    ("request for invalid resource limit"));
1202	*rlp = td->td_limit->pl_rlimit[which];
1203	if (p->p_sysent->sv_fixlimit != NULL)
1204		p->p_sysent->sv_fixlimit(rlp, which);
1205}
1206
1207void
1208lim_rlimit_proc(struct proc *p, int which, struct rlimit *rlp)
1209{
1210
1211	PROC_LOCK_ASSERT(p, MA_OWNED);
1212	KASSERT(which >= 0 && which < RLIM_NLIMITS,
1213	    ("request for invalid resource limit"));
1214	*rlp = p->p_limit->pl_rlimit[which];
1215	if (p->p_sysent->sv_fixlimit != NULL)
1216		p->p_sysent->sv_fixlimit(rlp, which);
1217}
1218
1219void
1220uihashinit()
1221{
1222
1223	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
1224	rw_init(&uihashtbl_lock, "uidinfo hash");
1225}
1226
1227/*
1228 * Look up a uidinfo struct for the parameter uid.
1229 * uihashtbl_lock must be locked.
1230 * Increase refcount on uidinfo struct returned.
1231 */
1232static struct uidinfo *
1233uilookup(uid_t uid)
1234{
1235	struct uihashhead *uipp;
1236	struct uidinfo *uip;
1237
1238	rw_assert(&uihashtbl_lock, RA_LOCKED);
1239	uipp = UIHASH(uid);
1240	LIST_FOREACH(uip, uipp, ui_hash)
1241		if (uip->ui_uid == uid) {
1242			uihold(uip);
1243			break;
1244		}
1245
1246	return (uip);
1247}
1248
1249/*
1250 * Find or allocate a struct uidinfo for a particular uid.
1251 * Returns with uidinfo struct referenced.
1252 * uifree() should be called on a struct uidinfo when released.
1253 */
1254struct uidinfo *
1255uifind(uid_t uid)
1256{
1257	struct uidinfo *new_uip, *uip;
1258
1259	rw_rlock(&uihashtbl_lock);
1260	uip = uilookup(uid);
1261	rw_runlock(&uihashtbl_lock);
1262	if (uip != NULL)
1263		return (uip);
1264
1265	new_uip = malloc(sizeof(*new_uip), M_UIDINFO, M_WAITOK | M_ZERO);
1266	racct_create(&new_uip->ui_racct);
1267	refcount_init(&new_uip->ui_ref, 1);
1268	new_uip->ui_uid = uid;
1269	mtx_init(&new_uip->ui_vmsize_mtx, "ui_vmsize", NULL, MTX_DEF);
1270
1271	rw_wlock(&uihashtbl_lock);
1272	/*
1273	 * There's a chance someone created our uidinfo while we
1274	 * were in malloc and not holding the lock, so we have to
1275	 * make sure we don't insert a duplicate uidinfo.
1276	 */
1277	if ((uip = uilookup(uid)) == NULL) {
1278		LIST_INSERT_HEAD(UIHASH(uid), new_uip, ui_hash);
1279		rw_wunlock(&uihashtbl_lock);
1280		uip = new_uip;
1281	} else {
1282		rw_wunlock(&uihashtbl_lock);
1283		racct_destroy(&new_uip->ui_racct);
1284		mtx_destroy(&new_uip->ui_vmsize_mtx);
1285		free(new_uip, M_UIDINFO);
1286	}
1287	return (uip);
1288}
1289
1290/*
1291 * Place another refcount on a uidinfo struct.
1292 */
1293void
1294uihold(struct uidinfo *uip)
1295{
1296
1297	refcount_acquire(&uip->ui_ref);
1298}
1299
1300/*-
1301 * Since uidinfo structs have a long lifetime, we use an
1302 * opportunistic refcounting scheme to avoid locking the lookup hash
1303 * for each release.
1304 *
1305 * If the refcount hits 0, we need to free the structure,
1306 * which means we need to lock the hash.
1307 * Optimal case:
1308 *   After locking the struct and lowering the refcount, if we find
1309 *   that we don't need to free, simply unlock and return.
1310 * Suboptimal case:
1311 *   If refcount lowering results in need to free, bump the count
1312 *   back up, lose the lock and acquire the locks in the proper
1313 *   order to try again.
1314 */
1315void
1316uifree(struct uidinfo *uip)
1317{
1318	int old;
1319
1320	/* Prepare for optimal case. */
1321	old = uip->ui_ref;
1322	if (old > 1 && atomic_cmpset_int(&uip->ui_ref, old, old - 1))
1323		return;
1324
1325	/* Prepare for suboptimal case. */
1326	rw_wlock(&uihashtbl_lock);
1327	if (refcount_release(&uip->ui_ref) == 0) {
1328		rw_wunlock(&uihashtbl_lock);
1329		return;
1330	}
1331
1332	racct_destroy(&uip->ui_racct);
1333	LIST_REMOVE(uip, ui_hash);
1334	rw_wunlock(&uihashtbl_lock);
1335
1336	if (uip->ui_sbsize != 0)
1337		printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
1338		    uip->ui_uid, uip->ui_sbsize);
1339	if (uip->ui_proccnt != 0)
1340		printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
1341		    uip->ui_uid, uip->ui_proccnt);
1342	if (uip->ui_vmsize != 0)
1343		printf("freeing uidinfo: uid = %d, swapuse = %lld\n",
1344		    uip->ui_uid, (unsigned long long)uip->ui_vmsize);
1345	mtx_destroy(&uip->ui_vmsize_mtx);
1346	free(uip, M_UIDINFO);
1347}
1348
1349#ifdef RACCT
1350void
1351ui_racct_foreach(void (*callback)(struct racct *racct,
1352    void *arg2, void *arg3), void (*pre)(void), void (*post)(void),
1353    void *arg2, void *arg3)
1354{
1355	struct uidinfo *uip;
1356	struct uihashhead *uih;
1357
1358	rw_rlock(&uihashtbl_lock);
1359	if (pre != NULL)
1360		(pre)();
1361	for (uih = &uihashtbl[uihash]; uih >= uihashtbl; uih--) {
1362		LIST_FOREACH(uip, uih, ui_hash) {
1363			(callback)(uip->ui_racct, arg2, arg3);
1364		}
1365	}
1366	if (post != NULL)
1367		(post)();
1368	rw_runlock(&uihashtbl_lock);
1369}
1370#endif
1371
1372static inline int
1373chglimit(struct uidinfo *uip, long *limit, int diff, rlim_t max, const char *name)
1374{
1375
1376	/* Don't allow them to exceed max, but allow subtraction. */
1377	if (diff > 0 && max != 0) {
1378		if (atomic_fetchadd_long(limit, (long)diff) + diff > max) {
1379			atomic_subtract_long(limit, (long)diff);
1380			return (0);
1381		}
1382	} else {
1383		atomic_add_long(limit, (long)diff);
1384		if (*limit < 0)
1385			printf("negative %s for uid = %d\n", name, uip->ui_uid);
1386	}
1387	return (1);
1388}
1389
1390/*
1391 * Change the count associated with number of processes
1392 * a given user is using.  When 'max' is 0, don't enforce a limit
1393 */
1394int
1395chgproccnt(struct uidinfo *uip, int diff, rlim_t max)
1396{
1397
1398	return (chglimit(uip, &uip->ui_proccnt, diff, max, "proccnt"));
1399}
1400
1401/*
1402 * Change the total socket buffer size a user has used.
1403 */
1404int
1405chgsbsize(struct uidinfo *uip, u_int *hiwat, u_int to, rlim_t max)
1406{
1407	int diff, rv;
1408
1409	diff = to - *hiwat;
1410	if (diff > 0 && max == 0) {
1411		rv = 0;
1412	} else {
1413		rv = chglimit(uip, &uip->ui_sbsize, diff, max, "sbsize");
1414		if (rv != 0)
1415			*hiwat = to;
1416	}
1417	return (rv);
1418}
1419
1420/*
1421 * Change the count associated with number of pseudo-terminals
1422 * a given user is using.  When 'max' is 0, don't enforce a limit
1423 */
1424int
1425chgptscnt(struct uidinfo *uip, int diff, rlim_t max)
1426{
1427
1428	return (chglimit(uip, &uip->ui_ptscnt, diff, max, "ptscnt"));
1429}
1430
1431int
1432chgkqcnt(struct uidinfo *uip, int diff, rlim_t max)
1433{
1434
1435	return (chglimit(uip, &uip->ui_kqcnt, diff, max, "kqcnt"));
1436}
1437
1438int
1439chgumtxcnt(struct uidinfo *uip, int diff, rlim_t max)
1440{
1441
1442	return (chglimit(uip, &uip->ui_umtxcnt, diff, max, "umtxcnt"));
1443}
1444