kern_resource.c revision 69022
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/kern_resource.c 69022 2000-11-22 07:42:04Z jake $
40 */
41
42#include "opt_compat.h"
43#include "opt_rlimit.h"
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/sysproto.h>
48#include <sys/file.h>
49#include <sys/kernel.h>
50#include <sys/resourcevar.h>
51#include <sys/malloc.h>
52#include <sys/proc.h>
53#include <sys/time.h>
54
55#include <vm/vm.h>
56#include <vm/vm_param.h>
57#include <sys/lock.h>
58#include <vm/pmap.h>
59#include <vm/vm_map.h>
60
61static int donice __P((struct proc *curp, struct proc *chgp, int n));
62/* dosetrlimit non-static:  Needed by SysVR4 emulator */
63int dosetrlimit __P((struct proc *p, u_int which, struct rlimit *limp));
64
65static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
66#define	UIHASH(uid)	(&uihashtbl[(uid) & uihash])
67static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
68static u_long uihash;		/* size of hash table - 1 */
69
70static struct uidinfo	*uicreate __P((uid_t uid));
71static struct uidinfo	*uilookup __P((uid_t uid));
72
73/*
74 * Resource controls and accounting.
75 */
76
77#ifndef _SYS_SYSPROTO_H_
78struct getpriority_args {
79	int	which;
80	int	who;
81};
82#endif
83int
84getpriority(curp, uap)
85	struct proc *curp;
86	register struct getpriority_args *uap;
87{
88	register struct proc *p;
89	register int low = PRIO_MAX + 1;
90
91	switch (uap->which) {
92
93	case PRIO_PROCESS:
94		if (uap->who == 0)
95			p = curp;
96		else
97			p = pfind(uap->who);
98		if (p == 0)
99			break;
100		if (p_can(curp, p, P_CAN_SEE, NULL))
101			break;
102		low = p->p_nice;
103		break;
104
105	case PRIO_PGRP: {
106		register struct pgrp *pg;
107
108		if (uap->who == 0)
109			pg = curp->p_pgrp;
110		else if ((pg = pgfind(uap->who)) == NULL)
111			break;
112		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
113			if (!p_can(curp, p, P_CAN_SEE, NULL) && p->p_nice < low)
114				low = p->p_nice;
115		}
116		break;
117	}
118
119	case PRIO_USER:
120		if (uap->who == 0)
121			uap->who = curp->p_ucred->cr_uid;
122		lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
123		LIST_FOREACH(p, &allproc, p_list)
124			if (!p_can(curp, p, P_CAN_SEE, NULL) &&
125			    p->p_ucred->cr_uid == uap->who &&
126			    p->p_nice < low)
127				low = p->p_nice;
128		lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
129		break;
130
131	default:
132		return (EINVAL);
133	}
134	if (low == PRIO_MAX + 1)
135		return (ESRCH);
136	curp->p_retval[0] = low;
137	return (0);
138}
139
140#ifndef _SYS_SYSPROTO_H_
141struct setpriority_args {
142	int	which;
143	int	who;
144	int	prio;
145};
146#endif
147/* ARGSUSED */
148int
149setpriority(curp, uap)
150	struct proc *curp;
151	register struct setpriority_args *uap;
152{
153	register struct proc *p;
154	int found = 0, error = 0;
155
156	switch (uap->which) {
157
158	case PRIO_PROCESS:
159		if (uap->who == 0)
160			p = curp;
161		else
162			p = pfind(uap->who);
163		if (p == 0)
164			break;
165		if (p_can(curp, p, P_CAN_SEE, NULL))
166			break;
167		error = donice(curp, p, uap->prio);
168		found++;
169		break;
170
171	case PRIO_PGRP: {
172		register struct pgrp *pg;
173
174		if (uap->who == 0)
175			pg = curp->p_pgrp;
176		else if ((pg = pgfind(uap->who)) == NULL)
177			break;
178		LIST_FOREACH(p, &pg->pg_members, p_pglist) {
179			if (!p_can(curp, p, P_CAN_SEE, NULL)) {
180				error = donice(curp, p, uap->prio);
181				found++;
182			}
183		}
184		break;
185	}
186
187	case PRIO_USER:
188		if (uap->who == 0)
189			uap->who = curp->p_ucred->cr_uid;
190		lockmgr(&allproc_lock, LK_SHARED, NULL, CURPROC);
191		LIST_FOREACH(p, &allproc, p_list)
192			if (p->p_ucred->cr_uid == uap->who &&
193			    !p_can(curp, p, P_CAN_SEE, NULL)) {
194				error = donice(curp, p, uap->prio);
195				found++;
196			}
197		lockmgr(&allproc_lock, LK_RELEASE, NULL, CURPROC);
198		break;
199
200	default:
201		return (EINVAL);
202	}
203	if (found == 0)
204		return (ESRCH);
205	return (error);
206}
207
208static int
209donice(curp, chgp, n)
210	register struct proc *curp, *chgp;
211	register int n;
212{
213	int	error;
214
215	if ((error = p_can(curp, chgp, P_CAN_SCHED, NULL)))
216		return (error);
217	if (n > PRIO_MAX)
218		n = PRIO_MAX;
219	if (n < PRIO_MIN)
220		n = PRIO_MIN;
221	if (n < chgp->p_nice && suser(curp))
222		return (EACCES);
223	chgp->p_nice = n;
224	(void)resetpriority(chgp);
225	return (0);
226}
227
228/* rtprio system call */
229#ifndef _SYS_SYSPROTO_H_
230struct rtprio_args {
231	int		function;
232	pid_t		pid;
233	struct rtprio	*rtp;
234};
235#endif
236
237/*
238 * Set realtime priority
239 */
240
241/* ARGSUSED */
242int
243rtprio(curp, uap)
244	struct proc *curp;
245	register struct rtprio_args *uap;
246{
247	register struct proc *p;
248	struct rtprio rtp;
249	int error;
250
251	error = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
252	if (error)
253		return (error);
254
255	if (uap->pid == 0)
256		p = curp;
257	else
258		p = pfind(uap->pid);
259
260	if (p == 0)
261		return (ESRCH);
262
263	switch (uap->function) {
264	case RTP_LOOKUP:
265		return (copyout(&p->p_rtprio, uap->rtp, sizeof(struct rtprio)));
266	case RTP_SET:
267		if ((error = p_can(curp, p, P_CAN_SCHED, NULL)))
268		        return (error);
269		/* disallow setting rtprio in most cases if not superuser */
270		if (suser(curp) != 0) {
271			/* can't set someone else's */
272			if (uap->pid)
273				return (EPERM);
274			/* can't set realtime priority */
275/*
276 * Realtime priority has to be restricted for reasons which should be
277 * obvious. However, for idle priority, there is a potential for
278 * system deadlock if an idleprio process gains a lock on a resource
279 * that other processes need (and the idleprio process can't run
280 * due to a CPU-bound normal process). Fix me! XXX
281 */
282#if 0
283 			if (RTP_PRIO_IS_REALTIME(rtp.type))
284#endif
285			if (rtp.type != RTP_PRIO_NORMAL)
286				return (EPERM);
287		}
288		switch (rtp.type) {
289#ifdef RTP_PRIO_FIFO
290		case RTP_PRIO_FIFO:
291#endif
292		case RTP_PRIO_REALTIME:
293		case RTP_PRIO_NORMAL:
294		case RTP_PRIO_IDLE:
295			if (rtp.prio > RTP_PRIO_MAX)
296				return (EINVAL);
297			p->p_rtprio = rtp;
298			return (0);
299		default:
300			return (EINVAL);
301		}
302
303	default:
304		return (EINVAL);
305	}
306}
307
308#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
309#ifndef _SYS_SYSPROTO_H_
310struct osetrlimit_args {
311	u_int	which;
312	struct	orlimit *rlp;
313};
314#endif
315/* ARGSUSED */
316int
317osetrlimit(p, uap)
318	struct proc *p;
319	register struct osetrlimit_args *uap;
320{
321	struct orlimit olim;
322	struct rlimit lim;
323	int error;
324
325	if ((error =
326	    copyin((caddr_t)uap->rlp, (caddr_t)&olim, sizeof(struct orlimit))))
327		return (error);
328	lim.rlim_cur = olim.rlim_cur;
329	lim.rlim_max = olim.rlim_max;
330	return (dosetrlimit(p, uap->which, &lim));
331}
332
333#ifndef _SYS_SYSPROTO_H_
334struct ogetrlimit_args {
335	u_int	which;
336	struct	orlimit *rlp;
337};
338#endif
339/* ARGSUSED */
340int
341ogetrlimit(p, uap)
342	struct proc *p;
343	register struct ogetrlimit_args *uap;
344{
345	struct orlimit olim;
346
347	if (uap->which >= RLIM_NLIMITS)
348		return (EINVAL);
349	olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
350	if (olim.rlim_cur == -1)
351		olim.rlim_cur = 0x7fffffff;
352	olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
353	if (olim.rlim_max == -1)
354		olim.rlim_max = 0x7fffffff;
355	return (copyout((caddr_t)&olim, (caddr_t)uap->rlp, sizeof(olim)));
356}
357#endif /* COMPAT_43 || COMPAT_SUNOS */
358
359#ifndef _SYS_SYSPROTO_H_
360struct __setrlimit_args {
361	u_int	which;
362	struct	rlimit *rlp;
363};
364#endif
365/* ARGSUSED */
366int
367setrlimit(p, uap)
368	struct proc *p;
369	register struct __setrlimit_args *uap;
370{
371	struct rlimit alim;
372	int error;
373
374	if ((error =
375	    copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit))))
376		return (error);
377	return (dosetrlimit(p, uap->which, &alim));
378}
379
380int
381dosetrlimit(p, which, limp)
382	struct proc *p;
383	u_int which;
384	struct rlimit *limp;
385{
386	register struct rlimit *alimp;
387	int error;
388
389	if (which >= RLIM_NLIMITS)
390		return (EINVAL);
391	alimp = &p->p_rlimit[which];
392
393	/*
394	 * Preserve historical bugs by treating negative limits as unsigned.
395	 */
396	if (limp->rlim_cur < 0)
397		limp->rlim_cur = RLIM_INFINITY;
398	if (limp->rlim_max < 0)
399		limp->rlim_max = RLIM_INFINITY;
400
401	if (limp->rlim_cur > alimp->rlim_max ||
402	    limp->rlim_max > alimp->rlim_max)
403		if ((error = suser_xxx(0, p, PRISON_ROOT)))
404			return (error);
405	if (limp->rlim_cur > limp->rlim_max)
406		limp->rlim_cur = limp->rlim_max;
407	if (p->p_limit->p_refcnt > 1 &&
408	    (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
409		p->p_limit->p_refcnt--;
410		p->p_limit = limcopy(p->p_limit);
411		alimp = &p->p_rlimit[which];
412	}
413
414	switch (which) {
415
416	case RLIMIT_CPU:
417		if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
418			p->p_limit->p_cpulimit = RLIM_INFINITY;
419		else
420			p->p_limit->p_cpulimit =
421			    (rlim_t)1000000 * limp->rlim_cur;
422		break;
423	case RLIMIT_DATA:
424		if (limp->rlim_cur > MAXDSIZ)
425			limp->rlim_cur = MAXDSIZ;
426		if (limp->rlim_max > MAXDSIZ)
427			limp->rlim_max = MAXDSIZ;
428		break;
429
430	case RLIMIT_STACK:
431		if (limp->rlim_cur > MAXSSIZ)
432			limp->rlim_cur = MAXSSIZ;
433		if (limp->rlim_max > MAXSSIZ)
434			limp->rlim_max = MAXSSIZ;
435		/*
436		 * Stack is allocated to the max at exec time with only
437		 * "rlim_cur" bytes accessible.  If stack limit is going
438		 * up make more accessible, if going down make inaccessible.
439		 */
440		if (limp->rlim_cur != alimp->rlim_cur) {
441			vm_offset_t addr;
442			vm_size_t size;
443			vm_prot_t prot;
444
445			if (limp->rlim_cur > alimp->rlim_cur) {
446				prot = VM_PROT_ALL;
447				size = limp->rlim_cur - alimp->rlim_cur;
448				addr = USRSTACK - limp->rlim_cur;
449			} else {
450				prot = VM_PROT_NONE;
451				size = alimp->rlim_cur - limp->rlim_cur;
452				addr = USRSTACK - alimp->rlim_cur;
453			}
454			addr = trunc_page(addr);
455			size = round_page(size);
456			(void) vm_map_protect(&p->p_vmspace->vm_map,
457					      addr, addr+size, prot, FALSE);
458		}
459		break;
460
461	case RLIMIT_NOFILE:
462		if (limp->rlim_cur > maxfilesperproc)
463			limp->rlim_cur = maxfilesperproc;
464		if (limp->rlim_max > maxfilesperproc)
465			limp->rlim_max = maxfilesperproc;
466		break;
467
468	case RLIMIT_NPROC:
469		if (limp->rlim_cur > maxprocperuid)
470			limp->rlim_cur = maxprocperuid;
471		if (limp->rlim_max > maxprocperuid)
472			limp->rlim_max = maxprocperuid;
473		break;
474	}
475	*alimp = *limp;
476	return (0);
477}
478
479#ifndef _SYS_SYSPROTO_H_
480struct __getrlimit_args {
481	u_int	which;
482	struct	rlimit *rlp;
483};
484#endif
485/* ARGSUSED */
486int
487getrlimit(p, uap)
488	struct proc *p;
489	register struct __getrlimit_args *uap;
490{
491
492	if (uap->which >= RLIM_NLIMITS)
493		return (EINVAL);
494	return (copyout((caddr_t)&p->p_rlimit[uap->which], (caddr_t)uap->rlp,
495	    sizeof (struct rlimit)));
496}
497
498/*
499 * Transform the running time and tick information in proc p into user,
500 * system, and interrupt time usage.
501 */
502void
503calcru(p, up, sp, ip)
504	struct proc *p;
505	struct timeval *up;
506	struct timeval *sp;
507	struct timeval *ip;
508{
509	/* {user, system, interrupt, total} {ticks, usec}; previous tu: */
510	u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
511	int s;
512	struct timeval tv;
513
514	/* XXX: why spl-protect ?  worst case is an off-by-one report */
515	s = splstatclock();
516	ut = p->p_uticks;
517	st = p->p_sticks;
518	it = p->p_iticks;
519	splx(s);
520
521	tt = ut + st + it;
522	if (tt == 0) {
523		st = 1;
524		tt = 1;
525	}
526
527	tu = p->p_runtime;
528	if (p == curproc) {
529		/*
530		 * Adjust for the current time slice.  This is actually fairly
531		 * important since the error here is on the order of a time
532		 * quantum, which is much greater than the sampling error.
533		 */
534		microuptime(&tv);
535		if (timevalcmp(&tv, &switchtime, <))
536			printf("microuptime() went backwards (%ld.%06ld -> %ld.%06ld)\n",
537			    switchtime.tv_sec, switchtime.tv_usec,
538			    tv.tv_sec, tv.tv_usec);
539		else
540			tu += (tv.tv_usec - switchtime.tv_usec) +
541			    (tv.tv_sec - switchtime.tv_sec) * (int64_t)1000000;
542	}
543	ptu = p->p_uu + p->p_su + p->p_iu;
544	if (tu < ptu || (int64_t)tu < 0) {
545		/* XXX no %qd in kernel.  Truncate. */
546		printf("calcru: negative time of %ld usec for pid %d (%s)\n",
547		       (long)tu, p->p_pid, p->p_comm);
548		tu = ptu;
549	}
550
551	/* Subdivide tu. */
552	uu = (tu * ut) / tt;
553	su = (tu * st) / tt;
554	iu = tu - uu - su;
555
556	/* Enforce monotonicity. */
557	if (uu < p->p_uu || su < p->p_su || iu < p->p_iu) {
558		if (uu < p->p_uu)
559			uu = p->p_uu;
560		else if (uu + p->p_su + p->p_iu > tu)
561			uu = tu - p->p_su - p->p_iu;
562		if (st == 0)
563			su = p->p_su;
564		else {
565			su = ((tu - uu) * st) / (st + it);
566			if (su < p->p_su)
567				su = p->p_su;
568			else if (uu + su + p->p_iu > tu)
569				su = tu - uu - p->p_iu;
570		}
571		KASSERT(uu + su + p->p_iu <= tu,
572		    ("calcru: monotonisation botch 1"));
573		iu = tu - uu - su;
574		KASSERT(iu >= p->p_iu,
575		    ("calcru: monotonisation botch 2"));
576	}
577	p->p_uu = uu;
578	p->p_su = su;
579	p->p_iu = iu;
580
581	up->tv_sec = uu / 1000000;
582	up->tv_usec = uu % 1000000;
583	sp->tv_sec = su / 1000000;
584	sp->tv_usec = su % 1000000;
585	if (ip != NULL) {
586		ip->tv_sec = iu / 1000000;
587		ip->tv_usec = iu % 1000000;
588	}
589}
590
591#ifndef _SYS_SYSPROTO_H_
592struct getrusage_args {
593	int	who;
594	struct	rusage *rusage;
595};
596#endif
597/* ARGSUSED */
598int
599getrusage(p, uap)
600	register struct proc *p;
601	register struct getrusage_args *uap;
602{
603	register struct rusage *rup;
604
605	switch (uap->who) {
606
607	case RUSAGE_SELF:
608		rup = &p->p_stats->p_ru;
609		calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
610		break;
611
612	case RUSAGE_CHILDREN:
613		rup = &p->p_stats->p_cru;
614		break;
615
616	default:
617		return (EINVAL);
618	}
619	return (copyout((caddr_t)rup, (caddr_t)uap->rusage,
620	    sizeof (struct rusage)));
621}
622
623void
624ruadd(ru, ru2)
625	register struct rusage *ru, *ru2;
626{
627	register long *ip, *ip2;
628	register int i;
629
630	timevaladd(&ru->ru_utime, &ru2->ru_utime);
631	timevaladd(&ru->ru_stime, &ru2->ru_stime);
632	if (ru->ru_maxrss < ru2->ru_maxrss)
633		ru->ru_maxrss = ru2->ru_maxrss;
634	ip = &ru->ru_first; ip2 = &ru2->ru_first;
635	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
636		*ip++ += *ip2++;
637}
638
639/*
640 * Make a copy of the plimit structure.
641 * We share these structures copy-on-write after fork,
642 * and copy when a limit is changed.
643 */
644struct plimit *
645limcopy(lim)
646	struct plimit *lim;
647{
648	register struct plimit *copy;
649
650	MALLOC(copy, struct plimit *, sizeof(struct plimit),
651	    M_SUBPROC, M_WAITOK);
652	bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
653	copy->p_lflags = 0;
654	copy->p_refcnt = 1;
655	return (copy);
656}
657
658/*
659 * Find the uidinfo structure for a uid.  This structure is used to
660 * track the total resource consumption (process count, socket buffer
661 * size, etc.) for the uid and impose limits.
662 */
663void
664uihashinit()
665{
666	uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
667}
668
669static struct uidinfo *
670uilookup(uid)
671	uid_t uid;
672{
673	struct	uihashhead *uipp;
674	struct	uidinfo *uip;
675
676	uipp = UIHASH(uid);
677	LIST_FOREACH(uip, uipp, ui_hash)
678		if (uip->ui_uid == uid)
679			break;
680
681	return (uip);
682}
683
684static struct uidinfo *
685uicreate(uid)
686	uid_t uid;
687{
688	struct	uidinfo *uip, *norace;
689
690	MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_NOWAIT);
691	if (uip == NULL) {
692		MALLOC(uip, struct uidinfo *, sizeof(*uip), M_UIDINFO, M_WAITOK);
693		/*
694		 * if we M_WAITOK we must look afterwards or risk
695		 * redundant entries
696		 */
697		norace = uilookup(uid);
698		if (norace != NULL) {
699			FREE(uip, M_UIDINFO);
700			return (norace);
701		}
702	}
703	LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
704	uip->ui_uid = uid;
705	uip->ui_proccnt = 0;
706	uip->ui_sbsize = 0;
707	uip->ui_ref = 0;
708	return (uip);
709}
710
711struct uidinfo *
712uifind(uid)
713	uid_t uid;
714{
715	struct	uidinfo *uip;
716
717	uip = uilookup(uid);
718	if (uip == NULL)
719		uip = uicreate(uid);
720	uip->ui_ref++;
721	return (uip);
722}
723
724int
725uifree(uip)
726	struct	uidinfo *uip;
727{
728
729	if (--uip->ui_ref == 0) {
730		if (uip->ui_sbsize != 0)
731			/* XXX no %qd in kernel.  Truncate. */
732			printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
733			    uip->ui_uid, (long)uip->ui_sbsize);
734		if (uip->ui_proccnt != 0)
735			printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
736			    uip->ui_uid, uip->ui_proccnt);
737		LIST_REMOVE(uip, ui_hash);
738		FREE(uip, M_UIDINFO);
739		return (1);
740	}
741	return (0);
742}
743
744/*
745 * Change the count associated with number of processes
746 * a given user is using.  When 'max' is 0, don't enforce a limit
747 */
748int
749chgproccnt(uip, diff, max)
750	struct	uidinfo	*uip;
751	int	diff;
752	int	max;
753{
754	/* don't allow them to exceed max, but allow subtraction */
755	if (diff > 0 && uip->ui_proccnt + diff > max && max != 0)
756		return (0);
757	uip->ui_proccnt += diff;
758	if (uip->ui_proccnt < 0)
759		printf("negative proccnt for uid = %d\n", uip->ui_uid);
760	return (1);
761}
762
763/*
764 * Change the total socket buffer size a user has used.
765 */
766int
767chgsbsize(uip, hiwat, to, max)
768	struct	uidinfo	*uip;
769	u_long *hiwat;
770	u_long	to;
771	rlim_t	max;
772{
773	rlim_t new;
774	int s;
775
776	s = splnet();
777	new = uip->ui_sbsize + to - *hiwat;
778	/* don't allow them to exceed max, but allow subtraction */
779	if (to > *hiwat && new > max) {
780		splx(s);
781		return (0);
782	}
783	uip->ui_sbsize = new;
784	*hiwat = to;
785	if (uip->ui_sbsize < 0)
786		printf("negative sbsize for uid = %d\n", uip->ui_uid);
787	splx(s);
788	return (1);
789}
790