Deleted Added
full compact
kern_resource.c (96886) kern_resource.c (99012)
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)kern_resource.c 8.5 (Berkeley) 1/21/94
39 * $FreeBSD: head/sys/kern/kern_resource.c 96886 2002-05-19 00:14:50Z jhb $
39 * $FreeBSD: head/sys/kern/kern_resource.c 99012 2002-06-29 02:00:02Z alfred $
40 */
41
42#include "opt_compat.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/file.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/malloc.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/sx.h>
55#include <sys/time.h>
56
57#include <vm/vm.h>
58#include <vm/vm_param.h>
59#include <vm/pmap.h>
60#include <vm/vm_map.h>
61
62static int donice(struct thread *td, struct proc *chgp, int n);
63
64static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
65#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
66static struct mtx uihashtbl_mtx;
67static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
68static u_long uihash; /* size of hash table - 1 */
69
70static struct uidinfo *uilookup(uid_t uid);
71
72/*
73 * Resource controls and accounting.
74 */
75
76#ifndef _SYS_SYSPROTO_H_
77struct getpriority_args {
78 int which;
79 int who;
80};
81#endif
82/*
83 * MPSAFE
84 */
85int
86getpriority(td, uap)
87 struct thread *td;
88 register struct getpriority_args *uap;
89{
90 register struct proc *p;
91 register int low = PRIO_MAX + 1;
92 int error = 0;
93
94 mtx_lock(&Giant);
95
96 switch (uap->which) {
97 case PRIO_PROCESS:
98 if (uap->who == 0)
99 low = td->td_ksegrp->kg_nice;
100 else {
101 p = pfind(uap->who);
102 if (p == NULL)
103 break;
104 if (p_cansee(td, p) == 0)
105 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
106 PROC_UNLOCK(p);
107 }
108 break;
109
110 case PRIO_PGRP: {
111 register struct pgrp *pg;
112
113 sx_slock(&proctree_lock);
114 if (uap->who == 0) {
115 pg = td->td_proc->p_pgrp;
116 PGRP_LOCK(pg);
117 } else {
118 pg = pgfind(uap->who);
119 if (pg == NULL) {
120 sx_sunlock(&proctree_lock);
121 break;
122 }
123 }
124 sx_sunlock(&proctree_lock);
125 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
126 PROC_LOCK(p);
127 if (!p_cansee(td, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low)
128 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
129 PROC_UNLOCK(p);
130 }
131 PGRP_UNLOCK(pg);
132 break;
133 }
134
135 case PRIO_USER:
136 if (uap->who == 0)
137 uap->who = td->td_ucred->cr_uid;
138 sx_slock(&allproc_lock);
139 LIST_FOREACH(p, &allproc, p_list) {
140 PROC_LOCK(p);
141 if (!p_cansee(td, p) &&
142 p->p_ucred->cr_uid == uap->who &&
143 p->p_ksegrp.kg_nice /* XXXKSE */ < low)
144 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
145 PROC_UNLOCK(p);
146 }
147 sx_sunlock(&allproc_lock);
148 break;
149
150 default:
151 error = EINVAL;
152 break;
153 }
154 if (low == PRIO_MAX + 1 && error == 0)
155 error = ESRCH;
156 td->td_retval[0] = low;
157 mtx_unlock(&Giant);
158 return (error);
159}
160
161#ifndef _SYS_SYSPROTO_H_
162struct setpriority_args {
163 int which;
164 int who;
165 int prio;
166};
167#endif
168/*
169 * MPSAFE
170 */
171/* ARGSUSED */
172int
173setpriority(td, uap)
174 struct thread *td;
175 register struct setpriority_args *uap;
176{
177 struct proc *curp = td->td_proc;
178 register struct proc *p;
179 int found = 0, error = 0;
180
181 mtx_lock(&Giant);
182
183 switch (uap->which) {
184 case PRIO_PROCESS:
185 if (uap->who == 0) {
186 PROC_LOCK(curp);
187 error = donice(td, curp, uap->prio);
188 PROC_UNLOCK(curp);
189 } else {
190 p = pfind(uap->who);
191 if (p == 0)
192 break;
193 if (p_cansee(td, p) == 0)
194 error = donice(td, p, uap->prio);
195 PROC_UNLOCK(p);
196 }
197 found++;
198 break;
199
200 case PRIO_PGRP: {
201 register struct pgrp *pg;
202
203 sx_slock(&proctree_lock);
204 if (uap->who == 0) {
205 pg = curp->p_pgrp;
206 PGRP_LOCK(pg);
207 } else {
208 pg = pgfind(uap->who);
209 if (pg == NULL) {
210 sx_sunlock(&proctree_lock);
211 break;
212 }
213 }
214 sx_sunlock(&proctree_lock);
215 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
216 PROC_LOCK(p);
217 if (!p_cansee(td, p)) {
218 error = donice(td, p, uap->prio);
219 found++;
220 }
221 PROC_UNLOCK(p);
222 }
223 PGRP_UNLOCK(pg);
224 break;
225 }
226
227 case PRIO_USER:
228 if (uap->who == 0)
229 uap->who = td->td_ucred->cr_uid;
230 sx_slock(&allproc_lock);
231 FOREACH_PROC_IN_SYSTEM(p) {
232 PROC_LOCK(p);
233 if (p->p_ucred->cr_uid == uap->who &&
234 !p_cansee(td, p)) {
235 error = donice(td, p, uap->prio);
236 found++;
237 }
238 PROC_UNLOCK(p);
239 }
240 sx_sunlock(&allproc_lock);
241 break;
242
243 default:
244 error = EINVAL;
245 break;
246 }
247 if (found == 0 && error == 0)
248 error = ESRCH;
249 mtx_unlock(&Giant);
250 return (error);
251}
252
253static int
254donice(td, chgp, n)
255 struct thread *td;
256 register struct proc *chgp;
257 register int n;
258{
259 int error;
260
261 PROC_LOCK_ASSERT(chgp, MA_OWNED);
262 if ((error = p_cansched(td, chgp)))
263 return (error);
264 if (n > PRIO_MAX)
265 n = PRIO_MAX;
266 if (n < PRIO_MIN)
267 n = PRIO_MIN;
268 if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && suser(td))
269 return (EACCES);
270 chgp->p_ksegrp.kg_nice /* XXXKSE */ = n;
271 (void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */
272 return (0);
273}
274
275/* rtprio system call */
276#ifndef _SYS_SYSPROTO_H_
277struct rtprio_args {
278 int function;
279 pid_t pid;
280 struct rtprio *rtp;
281};
282#endif
283
284/*
285 * Set realtime priority
286 */
287
288/*
289 * MPSAFE
290 */
291/* ARGSUSED */
292int
293rtprio(td, uap)
294 struct thread *td;
295 register struct rtprio_args *uap;
296{
297 struct proc *curp = td->td_proc;
298 register struct proc *p;
299 struct rtprio rtp;
300 int error, cierror = 0;
301
302 /* Perform copyin before acquiring locks if needed. */
303 if (uap->function == RTP_SET)
304 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
305
306 if (uap->pid == 0) {
307 p = curp;
308 PROC_LOCK(p);
309 } else {
310 p = pfind(uap->pid);
311 if (p == NULL)
312 return (ESRCH);
313 }
314
315 switch (uap->function) {
316 case RTP_LOOKUP:
317 if ((error = p_cansee(td, p)))
318 break;
319 mtx_lock_spin(&sched_lock);
320 pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp);
321 mtx_unlock_spin(&sched_lock);
322 PROC_UNLOCK(p);
323 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
324 case RTP_SET:
325 if ((error = p_cansched(td, p)) || (error = cierror))
326 break;
327 /* disallow setting rtprio in most cases if not superuser */
328 if (suser(td) != 0) {
329 /* can't set someone else's */
330 if (uap->pid) {
331 error = EPERM;
332 break;
333 }
334 /* can't set realtime priority */
335/*
336 * Realtime priority has to be restricted for reasons which should be
337 * obvious. However, for idle priority, there is a potential for
338 * system deadlock if an idleprio process gains a lock on a resource
339 * that other processes need (and the idleprio process can't run
340 * due to a CPU-bound normal process). Fix me! XXX
341 */
342#if 0
343 if (RTP_PRIO_IS_REALTIME(rtp.type))
344#endif
345 if (rtp.type != RTP_PRIO_NORMAL) {
346 error = EPERM;
347 break;
348 }
349 }
350 mtx_lock_spin(&sched_lock);
351 error = rtp_to_pri(&rtp, &p->p_ksegrp);
352 mtx_unlock_spin(&sched_lock);
353 break;
354 default:
355 error = EINVAL;
356 break;
357 }
358 PROC_UNLOCK(p);
359 return (error);
360}
361
362int
363rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
364{
365
366 if (rtp->prio > RTP_PRIO_MAX)
367 return (EINVAL);
368 switch (RTP_PRIO_BASE(rtp->type)) {
369 case RTP_PRIO_REALTIME:
370 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
371 break;
372 case RTP_PRIO_NORMAL:
373 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
374 break;
375 case RTP_PRIO_IDLE:
376 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
377 break;
378 default:
379 return (EINVAL);
380 }
381 kg->kg_pri_class = rtp->type;
382 if (curthread->td_ksegrp == kg) {
383 curthread->td_base_pri = kg->kg_user_pri;
384 curthread->td_priority = kg->kg_user_pri; /* XXX dubious */
385 }
386 return (0);
387}
388
389void
390pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
391{
392
393 switch (PRI_BASE(kg->kg_pri_class)) {
394 case PRI_REALTIME:
395 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
396 break;
397 case PRI_TIMESHARE:
398 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
399 break;
400 case PRI_IDLE:
401 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
402 break;
403 default:
404 break;
405 }
406 rtp->type = kg->kg_pri_class;
407}
408
409#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
410#ifndef _SYS_SYSPROTO_H_
411struct osetrlimit_args {
412 u_int which;
413 struct orlimit *rlp;
414};
415#endif
416/*
417 * MPSAFE
418 */
419/* ARGSUSED */
420int
421osetrlimit(td, uap)
422 struct thread *td;
423 register struct osetrlimit_args *uap;
424{
425 struct orlimit olim;
426 struct rlimit lim;
427 int error;
428
40 */
41
42#include "opt_compat.h"
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/sysproto.h>
47#include <sys/file.h>
48#include <sys/kernel.h>
49#include <sys/lock.h>
50#include <sys/malloc.h>
51#include <sys/mutex.h>
52#include <sys/proc.h>
53#include <sys/resourcevar.h>
54#include <sys/sx.h>
55#include <sys/time.h>
56
57#include <vm/vm.h>
58#include <vm/vm_param.h>
59#include <vm/pmap.h>
60#include <vm/vm_map.h>
61
62static int donice(struct thread *td, struct proc *chgp, int n);
63
64static MALLOC_DEFINE(M_UIDINFO, "uidinfo", "uidinfo structures");
65#define UIHASH(uid) (&uihashtbl[(uid) & uihash])
66static struct mtx uihashtbl_mtx;
67static LIST_HEAD(uihashhead, uidinfo) *uihashtbl;
68static u_long uihash; /* size of hash table - 1 */
69
70static struct uidinfo *uilookup(uid_t uid);
71
72/*
73 * Resource controls and accounting.
74 */
75
76#ifndef _SYS_SYSPROTO_H_
77struct getpriority_args {
78 int which;
79 int who;
80};
81#endif
82/*
83 * MPSAFE
84 */
85int
86getpriority(td, uap)
87 struct thread *td;
88 register struct getpriority_args *uap;
89{
90 register struct proc *p;
91 register int low = PRIO_MAX + 1;
92 int error = 0;
93
94 mtx_lock(&Giant);
95
96 switch (uap->which) {
97 case PRIO_PROCESS:
98 if (uap->who == 0)
99 low = td->td_ksegrp->kg_nice;
100 else {
101 p = pfind(uap->who);
102 if (p == NULL)
103 break;
104 if (p_cansee(td, p) == 0)
105 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
106 PROC_UNLOCK(p);
107 }
108 break;
109
110 case PRIO_PGRP: {
111 register struct pgrp *pg;
112
113 sx_slock(&proctree_lock);
114 if (uap->who == 0) {
115 pg = td->td_proc->p_pgrp;
116 PGRP_LOCK(pg);
117 } else {
118 pg = pgfind(uap->who);
119 if (pg == NULL) {
120 sx_sunlock(&proctree_lock);
121 break;
122 }
123 }
124 sx_sunlock(&proctree_lock);
125 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
126 PROC_LOCK(p);
127 if (!p_cansee(td, p) && p->p_ksegrp.kg_nice /* XXXKSE */ < low)
128 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
129 PROC_UNLOCK(p);
130 }
131 PGRP_UNLOCK(pg);
132 break;
133 }
134
135 case PRIO_USER:
136 if (uap->who == 0)
137 uap->who = td->td_ucred->cr_uid;
138 sx_slock(&allproc_lock);
139 LIST_FOREACH(p, &allproc, p_list) {
140 PROC_LOCK(p);
141 if (!p_cansee(td, p) &&
142 p->p_ucred->cr_uid == uap->who &&
143 p->p_ksegrp.kg_nice /* XXXKSE */ < low)
144 low = p->p_ksegrp.kg_nice /* XXXKSE */ ;
145 PROC_UNLOCK(p);
146 }
147 sx_sunlock(&allproc_lock);
148 break;
149
150 default:
151 error = EINVAL;
152 break;
153 }
154 if (low == PRIO_MAX + 1 && error == 0)
155 error = ESRCH;
156 td->td_retval[0] = low;
157 mtx_unlock(&Giant);
158 return (error);
159}
160
161#ifndef _SYS_SYSPROTO_H_
162struct setpriority_args {
163 int which;
164 int who;
165 int prio;
166};
167#endif
168/*
169 * MPSAFE
170 */
171/* ARGSUSED */
172int
173setpriority(td, uap)
174 struct thread *td;
175 register struct setpriority_args *uap;
176{
177 struct proc *curp = td->td_proc;
178 register struct proc *p;
179 int found = 0, error = 0;
180
181 mtx_lock(&Giant);
182
183 switch (uap->which) {
184 case PRIO_PROCESS:
185 if (uap->who == 0) {
186 PROC_LOCK(curp);
187 error = donice(td, curp, uap->prio);
188 PROC_UNLOCK(curp);
189 } else {
190 p = pfind(uap->who);
191 if (p == 0)
192 break;
193 if (p_cansee(td, p) == 0)
194 error = donice(td, p, uap->prio);
195 PROC_UNLOCK(p);
196 }
197 found++;
198 break;
199
200 case PRIO_PGRP: {
201 register struct pgrp *pg;
202
203 sx_slock(&proctree_lock);
204 if (uap->who == 0) {
205 pg = curp->p_pgrp;
206 PGRP_LOCK(pg);
207 } else {
208 pg = pgfind(uap->who);
209 if (pg == NULL) {
210 sx_sunlock(&proctree_lock);
211 break;
212 }
213 }
214 sx_sunlock(&proctree_lock);
215 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
216 PROC_LOCK(p);
217 if (!p_cansee(td, p)) {
218 error = donice(td, p, uap->prio);
219 found++;
220 }
221 PROC_UNLOCK(p);
222 }
223 PGRP_UNLOCK(pg);
224 break;
225 }
226
227 case PRIO_USER:
228 if (uap->who == 0)
229 uap->who = td->td_ucred->cr_uid;
230 sx_slock(&allproc_lock);
231 FOREACH_PROC_IN_SYSTEM(p) {
232 PROC_LOCK(p);
233 if (p->p_ucred->cr_uid == uap->who &&
234 !p_cansee(td, p)) {
235 error = donice(td, p, uap->prio);
236 found++;
237 }
238 PROC_UNLOCK(p);
239 }
240 sx_sunlock(&allproc_lock);
241 break;
242
243 default:
244 error = EINVAL;
245 break;
246 }
247 if (found == 0 && error == 0)
248 error = ESRCH;
249 mtx_unlock(&Giant);
250 return (error);
251}
252
253static int
254donice(td, chgp, n)
255 struct thread *td;
256 register struct proc *chgp;
257 register int n;
258{
259 int error;
260
261 PROC_LOCK_ASSERT(chgp, MA_OWNED);
262 if ((error = p_cansched(td, chgp)))
263 return (error);
264 if (n > PRIO_MAX)
265 n = PRIO_MAX;
266 if (n < PRIO_MIN)
267 n = PRIO_MIN;
268 if (n < chgp->p_ksegrp.kg_nice /* XXXKSE */ && suser(td))
269 return (EACCES);
270 chgp->p_ksegrp.kg_nice /* XXXKSE */ = n;
271 (void)resetpriority(&chgp->p_ksegrp); /* XXXKSE */
272 return (0);
273}
274
275/* rtprio system call */
276#ifndef _SYS_SYSPROTO_H_
277struct rtprio_args {
278 int function;
279 pid_t pid;
280 struct rtprio *rtp;
281};
282#endif
283
284/*
285 * Set realtime priority
286 */
287
288/*
289 * MPSAFE
290 */
291/* ARGSUSED */
292int
293rtprio(td, uap)
294 struct thread *td;
295 register struct rtprio_args *uap;
296{
297 struct proc *curp = td->td_proc;
298 register struct proc *p;
299 struct rtprio rtp;
300 int error, cierror = 0;
301
302 /* Perform copyin before acquiring locks if needed. */
303 if (uap->function == RTP_SET)
304 cierror = copyin(uap->rtp, &rtp, sizeof(struct rtprio));
305
306 if (uap->pid == 0) {
307 p = curp;
308 PROC_LOCK(p);
309 } else {
310 p = pfind(uap->pid);
311 if (p == NULL)
312 return (ESRCH);
313 }
314
315 switch (uap->function) {
316 case RTP_LOOKUP:
317 if ((error = p_cansee(td, p)))
318 break;
319 mtx_lock_spin(&sched_lock);
320 pri_to_rtp(&p->p_ksegrp /* XXXKSE */ , &rtp);
321 mtx_unlock_spin(&sched_lock);
322 PROC_UNLOCK(p);
323 return (copyout(&rtp, uap->rtp, sizeof(struct rtprio)));
324 case RTP_SET:
325 if ((error = p_cansched(td, p)) || (error = cierror))
326 break;
327 /* disallow setting rtprio in most cases if not superuser */
328 if (suser(td) != 0) {
329 /* can't set someone else's */
330 if (uap->pid) {
331 error = EPERM;
332 break;
333 }
334 /* can't set realtime priority */
335/*
336 * Realtime priority has to be restricted for reasons which should be
337 * obvious. However, for idle priority, there is a potential for
338 * system deadlock if an idleprio process gains a lock on a resource
339 * that other processes need (and the idleprio process can't run
340 * due to a CPU-bound normal process). Fix me! XXX
341 */
342#if 0
343 if (RTP_PRIO_IS_REALTIME(rtp.type))
344#endif
345 if (rtp.type != RTP_PRIO_NORMAL) {
346 error = EPERM;
347 break;
348 }
349 }
350 mtx_lock_spin(&sched_lock);
351 error = rtp_to_pri(&rtp, &p->p_ksegrp);
352 mtx_unlock_spin(&sched_lock);
353 break;
354 default:
355 error = EINVAL;
356 break;
357 }
358 PROC_UNLOCK(p);
359 return (error);
360}
361
362int
363rtp_to_pri(struct rtprio *rtp, struct ksegrp *kg)
364{
365
366 if (rtp->prio > RTP_PRIO_MAX)
367 return (EINVAL);
368 switch (RTP_PRIO_BASE(rtp->type)) {
369 case RTP_PRIO_REALTIME:
370 kg->kg_user_pri = PRI_MIN_REALTIME + rtp->prio;
371 break;
372 case RTP_PRIO_NORMAL:
373 kg->kg_user_pri = PRI_MIN_TIMESHARE + rtp->prio;
374 break;
375 case RTP_PRIO_IDLE:
376 kg->kg_user_pri = PRI_MIN_IDLE + rtp->prio;
377 break;
378 default:
379 return (EINVAL);
380 }
381 kg->kg_pri_class = rtp->type;
382 if (curthread->td_ksegrp == kg) {
383 curthread->td_base_pri = kg->kg_user_pri;
384 curthread->td_priority = kg->kg_user_pri; /* XXX dubious */
385 }
386 return (0);
387}
388
389void
390pri_to_rtp(struct ksegrp *kg, struct rtprio *rtp)
391{
392
393 switch (PRI_BASE(kg->kg_pri_class)) {
394 case PRI_REALTIME:
395 rtp->prio = kg->kg_user_pri - PRI_MIN_REALTIME;
396 break;
397 case PRI_TIMESHARE:
398 rtp->prio = kg->kg_user_pri - PRI_MIN_TIMESHARE;
399 break;
400 case PRI_IDLE:
401 rtp->prio = kg->kg_user_pri - PRI_MIN_IDLE;
402 break;
403 default:
404 break;
405 }
406 rtp->type = kg->kg_pri_class;
407}
408
409#if defined(COMPAT_43) || defined(COMPAT_SUNOS)
410#ifndef _SYS_SYSPROTO_H_
411struct osetrlimit_args {
412 u_int which;
413 struct orlimit *rlp;
414};
415#endif
416/*
417 * MPSAFE
418 */
419/* ARGSUSED */
420int
421osetrlimit(td, uap)
422 struct thread *td;
423 register struct osetrlimit_args *uap;
424{
425 struct orlimit olim;
426 struct rlimit lim;
427 int error;
428
429 if ((error =
430 copyin((caddr_t)uap->rlp, (caddr_t)&olim, sizeof(struct orlimit))))
429 if ((error = copyin(uap->rlp, &olim, sizeof(struct orlimit))))
431 return (error);
432 lim.rlim_cur = olim.rlim_cur;
433 lim.rlim_max = olim.rlim_max;
434 mtx_lock(&Giant);
435 error = dosetrlimit(td, uap->which, &lim);
436 mtx_unlock(&Giant);
437 return (error);
438}
439
440#ifndef _SYS_SYSPROTO_H_
441struct ogetrlimit_args {
442 u_int which;
443 struct orlimit *rlp;
444};
445#endif
446/*
447 * MPSAFE
448 */
449/* ARGSUSED */
450int
451ogetrlimit(td, uap)
452 struct thread *td;
453 register struct ogetrlimit_args *uap;
454{
455 struct proc *p = td->td_proc;
456 struct orlimit olim;
457 int error;
458
459 if (uap->which >= RLIM_NLIMITS)
460 return (EINVAL);
461 mtx_lock(&Giant);
462 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
463 if (olim.rlim_cur == -1)
464 olim.rlim_cur = 0x7fffffff;
465 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
466 if (olim.rlim_max == -1)
467 olim.rlim_max = 0x7fffffff;
430 return (error);
431 lim.rlim_cur = olim.rlim_cur;
432 lim.rlim_max = olim.rlim_max;
433 mtx_lock(&Giant);
434 error = dosetrlimit(td, uap->which, &lim);
435 mtx_unlock(&Giant);
436 return (error);
437}
438
439#ifndef _SYS_SYSPROTO_H_
440struct ogetrlimit_args {
441 u_int which;
442 struct orlimit *rlp;
443};
444#endif
445/*
446 * MPSAFE
447 */
448/* ARGSUSED */
449int
450ogetrlimit(td, uap)
451 struct thread *td;
452 register struct ogetrlimit_args *uap;
453{
454 struct proc *p = td->td_proc;
455 struct orlimit olim;
456 int error;
457
458 if (uap->which >= RLIM_NLIMITS)
459 return (EINVAL);
460 mtx_lock(&Giant);
461 olim.rlim_cur = p->p_rlimit[uap->which].rlim_cur;
462 if (olim.rlim_cur == -1)
463 olim.rlim_cur = 0x7fffffff;
464 olim.rlim_max = p->p_rlimit[uap->which].rlim_max;
465 if (olim.rlim_max == -1)
466 olim.rlim_max = 0x7fffffff;
468 error = copyout((caddr_t)&olim, (caddr_t)uap->rlp, sizeof(olim));
467 error = copyout(&olim, uap->rlp, sizeof(olim));
469 mtx_unlock(&Giant);
470 return (error);
471}
472#endif /* COMPAT_43 || COMPAT_SUNOS */
473
474#ifndef _SYS_SYSPROTO_H_
475struct __setrlimit_args {
476 u_int which;
477 struct rlimit *rlp;
478};
479#endif
480/*
481 * MPSAFE
482 */
483/* ARGSUSED */
484int
485setrlimit(td, uap)
486 struct thread *td;
487 register struct __setrlimit_args *uap;
488{
489 struct rlimit alim;
490 int error;
491
468 mtx_unlock(&Giant);
469 return (error);
470}
471#endif /* COMPAT_43 || COMPAT_SUNOS */
472
473#ifndef _SYS_SYSPROTO_H_
474struct __setrlimit_args {
475 u_int which;
476 struct rlimit *rlp;
477};
478#endif
479/*
480 * MPSAFE
481 */
482/* ARGSUSED */
483int
484setrlimit(td, uap)
485 struct thread *td;
486 register struct __setrlimit_args *uap;
487{
488 struct rlimit alim;
489 int error;
490
492 if ((error =
493 copyin((caddr_t)uap->rlp, (caddr_t)&alim, sizeof (struct rlimit))))
491 if ((error = copyin(uap->rlp, &alim, sizeof (struct rlimit))))
494 return (error);
495 mtx_lock(&Giant);
496 error = dosetrlimit(td, uap->which, &alim);
497 mtx_unlock(&Giant);
498 return (error);
499}
500
501int
502dosetrlimit(td, which, limp)
503 struct thread *td;
504 u_int which;
505 struct rlimit *limp;
506{
507 struct proc *p = td->td_proc;
508 register struct rlimit *alimp;
509 int error;
510
511 GIANT_REQUIRED;
512
513 if (which >= RLIM_NLIMITS)
514 return (EINVAL);
515 alimp = &p->p_rlimit[which];
516
517 /*
518 * Preserve historical bugs by treating negative limits as unsigned.
519 */
520 if (limp->rlim_cur < 0)
521 limp->rlim_cur = RLIM_INFINITY;
522 if (limp->rlim_max < 0)
523 limp->rlim_max = RLIM_INFINITY;
524
525 if (limp->rlim_cur > alimp->rlim_max ||
526 limp->rlim_max > alimp->rlim_max)
527 if ((error = suser_cred(td->td_ucred, PRISON_ROOT)))
528 return (error);
529 if (limp->rlim_cur > limp->rlim_max)
530 limp->rlim_cur = limp->rlim_max;
531 if (p->p_limit->p_refcnt > 1 &&
532 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
533 p->p_limit->p_refcnt--;
534 p->p_limit = limcopy(p->p_limit);
535 alimp = &p->p_rlimit[which];
536 }
537
538 switch (which) {
539
540 case RLIMIT_CPU:
541 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
542 p->p_limit->p_cpulimit = RLIM_INFINITY;
543 else
544 p->p_limit->p_cpulimit =
545 (rlim_t)1000000 * limp->rlim_cur;
546 break;
547 case RLIMIT_DATA:
548 if (limp->rlim_cur > maxdsiz)
549 limp->rlim_cur = maxdsiz;
550 if (limp->rlim_max > maxdsiz)
551 limp->rlim_max = maxdsiz;
552 break;
553
554 case RLIMIT_STACK:
555 if (limp->rlim_cur > maxssiz)
556 limp->rlim_cur = maxssiz;
557 if (limp->rlim_max > maxssiz)
558 limp->rlim_max = maxssiz;
559 /*
560 * Stack is allocated to the max at exec time with only
561 * "rlim_cur" bytes accessible. If stack limit is going
562 * up make more accessible, if going down make inaccessible.
563 */
564 if (limp->rlim_cur != alimp->rlim_cur) {
565 vm_offset_t addr;
566 vm_size_t size;
567 vm_prot_t prot;
568
569 if (limp->rlim_cur > alimp->rlim_cur) {
570 prot = VM_PROT_ALL;
571 size = limp->rlim_cur - alimp->rlim_cur;
572 addr = USRSTACK - limp->rlim_cur;
573 } else {
574 prot = VM_PROT_NONE;
575 size = alimp->rlim_cur - limp->rlim_cur;
576 addr = USRSTACK - alimp->rlim_cur;
577 }
578 addr = trunc_page(addr);
579 size = round_page(size);
580 (void) vm_map_protect(&p->p_vmspace->vm_map,
581 addr, addr+size, prot, FALSE);
582 }
583 break;
584
585 case RLIMIT_NOFILE:
586 if (limp->rlim_cur > maxfilesperproc)
587 limp->rlim_cur = maxfilesperproc;
588 if (limp->rlim_max > maxfilesperproc)
589 limp->rlim_max = maxfilesperproc;
590 break;
591
592 case RLIMIT_NPROC:
593 if (limp->rlim_cur > maxprocperuid)
594 limp->rlim_cur = maxprocperuid;
595 if (limp->rlim_max > maxprocperuid)
596 limp->rlim_max = maxprocperuid;
597 if (limp->rlim_cur < 1)
598 limp->rlim_cur = 1;
599 if (limp->rlim_max < 1)
600 limp->rlim_max = 1;
601 break;
602 }
603 *alimp = *limp;
604 return (0);
605}
606
607#ifndef _SYS_SYSPROTO_H_
608struct __getrlimit_args {
609 u_int which;
610 struct rlimit *rlp;
611};
612#endif
613/*
614 * MPSAFE
615 */
616/* ARGSUSED */
617int
618getrlimit(td, uap)
619 struct thread *td;
620 register struct __getrlimit_args *uap;
621{
622 int error;
623 struct proc *p = td->td_proc;
624
625 if (uap->which >= RLIM_NLIMITS)
626 return (EINVAL);
627 mtx_lock(&Giant);
492 return (error);
493 mtx_lock(&Giant);
494 error = dosetrlimit(td, uap->which, &alim);
495 mtx_unlock(&Giant);
496 return (error);
497}
498
499int
500dosetrlimit(td, which, limp)
501 struct thread *td;
502 u_int which;
503 struct rlimit *limp;
504{
505 struct proc *p = td->td_proc;
506 register struct rlimit *alimp;
507 int error;
508
509 GIANT_REQUIRED;
510
511 if (which >= RLIM_NLIMITS)
512 return (EINVAL);
513 alimp = &p->p_rlimit[which];
514
515 /*
516 * Preserve historical bugs by treating negative limits as unsigned.
517 */
518 if (limp->rlim_cur < 0)
519 limp->rlim_cur = RLIM_INFINITY;
520 if (limp->rlim_max < 0)
521 limp->rlim_max = RLIM_INFINITY;
522
523 if (limp->rlim_cur > alimp->rlim_max ||
524 limp->rlim_max > alimp->rlim_max)
525 if ((error = suser_cred(td->td_ucred, PRISON_ROOT)))
526 return (error);
527 if (limp->rlim_cur > limp->rlim_max)
528 limp->rlim_cur = limp->rlim_max;
529 if (p->p_limit->p_refcnt > 1 &&
530 (p->p_limit->p_lflags & PL_SHAREMOD) == 0) {
531 p->p_limit->p_refcnt--;
532 p->p_limit = limcopy(p->p_limit);
533 alimp = &p->p_rlimit[which];
534 }
535
536 switch (which) {
537
538 case RLIMIT_CPU:
539 if (limp->rlim_cur > RLIM_INFINITY / (rlim_t)1000000)
540 p->p_limit->p_cpulimit = RLIM_INFINITY;
541 else
542 p->p_limit->p_cpulimit =
543 (rlim_t)1000000 * limp->rlim_cur;
544 break;
545 case RLIMIT_DATA:
546 if (limp->rlim_cur > maxdsiz)
547 limp->rlim_cur = maxdsiz;
548 if (limp->rlim_max > maxdsiz)
549 limp->rlim_max = maxdsiz;
550 break;
551
552 case RLIMIT_STACK:
553 if (limp->rlim_cur > maxssiz)
554 limp->rlim_cur = maxssiz;
555 if (limp->rlim_max > maxssiz)
556 limp->rlim_max = maxssiz;
557 /*
558 * Stack is allocated to the max at exec time with only
559 * "rlim_cur" bytes accessible. If stack limit is going
560 * up make more accessible, if going down make inaccessible.
561 */
562 if (limp->rlim_cur != alimp->rlim_cur) {
563 vm_offset_t addr;
564 vm_size_t size;
565 vm_prot_t prot;
566
567 if (limp->rlim_cur > alimp->rlim_cur) {
568 prot = VM_PROT_ALL;
569 size = limp->rlim_cur - alimp->rlim_cur;
570 addr = USRSTACK - limp->rlim_cur;
571 } else {
572 prot = VM_PROT_NONE;
573 size = alimp->rlim_cur - limp->rlim_cur;
574 addr = USRSTACK - alimp->rlim_cur;
575 }
576 addr = trunc_page(addr);
577 size = round_page(size);
578 (void) vm_map_protect(&p->p_vmspace->vm_map,
579 addr, addr+size, prot, FALSE);
580 }
581 break;
582
583 case RLIMIT_NOFILE:
584 if (limp->rlim_cur > maxfilesperproc)
585 limp->rlim_cur = maxfilesperproc;
586 if (limp->rlim_max > maxfilesperproc)
587 limp->rlim_max = maxfilesperproc;
588 break;
589
590 case RLIMIT_NPROC:
591 if (limp->rlim_cur > maxprocperuid)
592 limp->rlim_cur = maxprocperuid;
593 if (limp->rlim_max > maxprocperuid)
594 limp->rlim_max = maxprocperuid;
595 if (limp->rlim_cur < 1)
596 limp->rlim_cur = 1;
597 if (limp->rlim_max < 1)
598 limp->rlim_max = 1;
599 break;
600 }
601 *alimp = *limp;
602 return (0);
603}
604
605#ifndef _SYS_SYSPROTO_H_
606struct __getrlimit_args {
607 u_int which;
608 struct rlimit *rlp;
609};
610#endif
611/*
612 * MPSAFE
613 */
614/* ARGSUSED */
615int
616getrlimit(td, uap)
617 struct thread *td;
618 register struct __getrlimit_args *uap;
619{
620 int error;
621 struct proc *p = td->td_proc;
622
623 if (uap->which >= RLIM_NLIMITS)
624 return (EINVAL);
625 mtx_lock(&Giant);
628 error = copyout((caddr_t)&p->p_rlimit[uap->which], (caddr_t)uap->rlp,
626 error = copyout(&p->p_rlimit[uap->which], uap->rlp,
629 sizeof (struct rlimit));
630 mtx_unlock(&Giant);
631 return(error);
632}
633
634/*
635 * Transform the running time and tick information in proc p into user,
636 * system, and interrupt time usage.
637 */
638void
639calcru(p, up, sp, ip)
640 struct proc *p;
641 struct timeval *up;
642 struct timeval *sp;
643 struct timeval *ip;
644{
645 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
646 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
647 u_int64_t uut = 0, sut = 0, iut = 0;
648 int s;
649 struct timeval tv;
650 struct bintime bt;
651 struct kse *ke;
652 struct ksegrp *kg;
653
654 mtx_assert(&sched_lock, MA_OWNED);
655 /* XXX: why spl-protect ? worst case is an off-by-one report */
656
657 FOREACH_KSEGRP_IN_PROC(p, kg) {
658 /* we could accumulate per ksegrp and per process here*/
659 FOREACH_KSE_IN_GROUP(kg, ke) {
660 s = splstatclock();
661 ut = ke->ke_uticks;
662 st = ke->ke_sticks;
663 it = ke->ke_iticks;
664 splx(s);
665
666 tt = ut + st + it;
667 if (tt == 0) {
668 st = 1;
669 tt = 1;
670 }
671
672 if (ke == curthread->td_kse) {
673 /*
674 * Adjust for the current time slice. This is actually fairly
675 * important since the error here is on the order of a time
676 * quantum, which is much greater than the sampling error.
677 * XXXKSE use a different test due to threads on other
678 * processors also being 'current'.
679 */
680
681 binuptime(&bt);
682 bintime_sub(&bt, PCPU_PTR(switchtime));
683 bintime_add(&bt, &p->p_runtime);
684 } else {
685 bt = p->p_runtime;
686 }
687 bintime2timeval(&bt, &tv);
688 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
689 ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
690 if (tu < ptu || (int64_t)tu < 0) {
691 /* XXX no %qd in kernel. Truncate. */
692 printf("calcru: negative time of %ld usec for pid %d (%s)\n",
693 (long)tu, p->p_pid, p->p_comm);
694 tu = ptu;
695 }
696
697 /* Subdivide tu. */
698 uu = (tu * ut) / tt;
699 su = (tu * st) / tt;
700 iu = tu - uu - su;
701
702 /* Enforce monotonicity. */
703 if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
704 if (uu < ke->ke_uu)
705 uu = ke->ke_uu;
706 else if (uu + ke->ke_su + ke->ke_iu > tu)
707 uu = tu - ke->ke_su - ke->ke_iu;
708 if (st == 0)
709 su = ke->ke_su;
710 else {
711 su = ((tu - uu) * st) / (st + it);
712 if (su < ke->ke_su)
713 su = ke->ke_su;
714 else if (uu + su + ke->ke_iu > tu)
715 su = tu - uu - ke->ke_iu;
716 }
717 KASSERT(uu + su + ke->ke_iu <= tu,
718 ("calcru: monotonisation botch 1"));
719 iu = tu - uu - su;
720 KASSERT(iu >= ke->ke_iu,
721 ("calcru: monotonisation botch 2"));
722 }
723 ke->ke_uu = uu;
724 ke->ke_su = su;
725 ke->ke_iu = iu;
726 uut += uu;
727 sut += su;
728 iut += iu;
729
730 } /* end kse loop */
731 } /* end kseg loop */
732 up->tv_sec = uut / 1000000;
733 up->tv_usec = uut % 1000000;
734 sp->tv_sec = sut / 1000000;
735 sp->tv_usec = sut % 1000000;
736 if (ip != NULL) {
737 ip->tv_sec = iut / 1000000;
738 ip->tv_usec = iut % 1000000;
739 }
740}
741
742#ifndef _SYS_SYSPROTO_H_
743struct getrusage_args {
744 int who;
745 struct rusage *rusage;
746};
747#endif
748/*
749 * MPSAFE
750 */
751/* ARGSUSED */
752int
753getrusage(td, uap)
754 register struct thread *td;
755 register struct getrusage_args *uap;
756{
757 struct proc *p = td->td_proc;
758 register struct rusage *rup;
759 int error = 0;
760
761 mtx_lock(&Giant);
762
763 switch (uap->who) {
764 case RUSAGE_SELF:
765 rup = &p->p_stats->p_ru;
766 mtx_lock_spin(&sched_lock);
767 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
768 mtx_unlock_spin(&sched_lock);
769 break;
770
771 case RUSAGE_CHILDREN:
772 rup = &p->p_stats->p_cru;
773 break;
774
775 default:
776 rup = NULL;
777 error = EINVAL;
778 break;
779 }
780 mtx_unlock(&Giant);
781 if (error == 0) {
627 sizeof (struct rlimit));
628 mtx_unlock(&Giant);
629 return(error);
630}
631
632/*
633 * Transform the running time and tick information in proc p into user,
634 * system, and interrupt time usage.
635 */
636void
637calcru(p, up, sp, ip)
638 struct proc *p;
639 struct timeval *up;
640 struct timeval *sp;
641 struct timeval *ip;
642{
643 /* {user, system, interrupt, total} {ticks, usec}; previous tu: */
644 u_int64_t ut, uu, st, su, it, iu, tt, tu, ptu;
645 u_int64_t uut = 0, sut = 0, iut = 0;
646 int s;
647 struct timeval tv;
648 struct bintime bt;
649 struct kse *ke;
650 struct ksegrp *kg;
651
652 mtx_assert(&sched_lock, MA_OWNED);
653 /* XXX: why spl-protect ? worst case is an off-by-one report */
654
655 FOREACH_KSEGRP_IN_PROC(p, kg) {
656 /* we could accumulate per ksegrp and per process here*/
657 FOREACH_KSE_IN_GROUP(kg, ke) {
658 s = splstatclock();
659 ut = ke->ke_uticks;
660 st = ke->ke_sticks;
661 it = ke->ke_iticks;
662 splx(s);
663
664 tt = ut + st + it;
665 if (tt == 0) {
666 st = 1;
667 tt = 1;
668 }
669
670 if (ke == curthread->td_kse) {
671 /*
672 * Adjust for the current time slice. This is actually fairly
673 * important since the error here is on the order of a time
674 * quantum, which is much greater than the sampling error.
675 * XXXKSE use a different test due to threads on other
676 * processors also being 'current'.
677 */
678
679 binuptime(&bt);
680 bintime_sub(&bt, PCPU_PTR(switchtime));
681 bintime_add(&bt, &p->p_runtime);
682 } else {
683 bt = p->p_runtime;
684 }
685 bintime2timeval(&bt, &tv);
686 tu = (u_int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
687 ptu = ke->ke_uu + ke->ke_su + ke->ke_iu;
688 if (tu < ptu || (int64_t)tu < 0) {
689 /* XXX no %qd in kernel. Truncate. */
690 printf("calcru: negative time of %ld usec for pid %d (%s)\n",
691 (long)tu, p->p_pid, p->p_comm);
692 tu = ptu;
693 }
694
695 /* Subdivide tu. */
696 uu = (tu * ut) / tt;
697 su = (tu * st) / tt;
698 iu = tu - uu - su;
699
700 /* Enforce monotonicity. */
701 if (uu < ke->ke_uu || su < ke->ke_su || iu < ke->ke_iu) {
702 if (uu < ke->ke_uu)
703 uu = ke->ke_uu;
704 else if (uu + ke->ke_su + ke->ke_iu > tu)
705 uu = tu - ke->ke_su - ke->ke_iu;
706 if (st == 0)
707 su = ke->ke_su;
708 else {
709 su = ((tu - uu) * st) / (st + it);
710 if (su < ke->ke_su)
711 su = ke->ke_su;
712 else if (uu + su + ke->ke_iu > tu)
713 su = tu - uu - ke->ke_iu;
714 }
715 KASSERT(uu + su + ke->ke_iu <= tu,
716 ("calcru: monotonisation botch 1"));
717 iu = tu - uu - su;
718 KASSERT(iu >= ke->ke_iu,
719 ("calcru: monotonisation botch 2"));
720 }
721 ke->ke_uu = uu;
722 ke->ke_su = su;
723 ke->ke_iu = iu;
724 uut += uu;
725 sut += su;
726 iut += iu;
727
728 } /* end kse loop */
729 } /* end kseg loop */
730 up->tv_sec = uut / 1000000;
731 up->tv_usec = uut % 1000000;
732 sp->tv_sec = sut / 1000000;
733 sp->tv_usec = sut % 1000000;
734 if (ip != NULL) {
735 ip->tv_sec = iut / 1000000;
736 ip->tv_usec = iut % 1000000;
737 }
738}
739
740#ifndef _SYS_SYSPROTO_H_
741struct getrusage_args {
742 int who;
743 struct rusage *rusage;
744};
745#endif
746/*
747 * MPSAFE
748 */
749/* ARGSUSED */
750int
751getrusage(td, uap)
752 register struct thread *td;
753 register struct getrusage_args *uap;
754{
755 struct proc *p = td->td_proc;
756 register struct rusage *rup;
757 int error = 0;
758
759 mtx_lock(&Giant);
760
761 switch (uap->who) {
762 case RUSAGE_SELF:
763 rup = &p->p_stats->p_ru;
764 mtx_lock_spin(&sched_lock);
765 calcru(p, &rup->ru_utime, &rup->ru_stime, NULL);
766 mtx_unlock_spin(&sched_lock);
767 break;
768
769 case RUSAGE_CHILDREN:
770 rup = &p->p_stats->p_cru;
771 break;
772
773 default:
774 rup = NULL;
775 error = EINVAL;
776 break;
777 }
778 mtx_unlock(&Giant);
779 if (error == 0) {
782 error = copyout((caddr_t)rup, (caddr_t)uap->rusage,
783 sizeof (struct rusage));
780 error = copyout(rup, uap->rusage, sizeof (struct rusage));
784 }
785 return(error);
786}
787
788void
789ruadd(ru, ru2)
790 register struct rusage *ru, *ru2;
791{
792 register long *ip, *ip2;
793 register int i;
794
795 timevaladd(&ru->ru_utime, &ru2->ru_utime);
796 timevaladd(&ru->ru_stime, &ru2->ru_stime);
797 if (ru->ru_maxrss < ru2->ru_maxrss)
798 ru->ru_maxrss = ru2->ru_maxrss;
799 ip = &ru->ru_first; ip2 = &ru2->ru_first;
800 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
801 *ip++ += *ip2++;
802}
803
804/*
805 * Make a copy of the plimit structure.
806 * We share these structures copy-on-write after fork,
807 * and copy when a limit is changed.
808 */
809struct plimit *
810limcopy(lim)
811 struct plimit *lim;
812{
813 register struct plimit *copy;
814
815 MALLOC(copy, struct plimit *, sizeof(struct plimit),
816 M_SUBPROC, M_WAITOK);
817 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
818 copy->p_lflags = 0;
819 copy->p_refcnt = 1;
820 return (copy);
821}
822
823/*
824 * Find the uidinfo structure for a uid. This structure is used to
825 * track the total resource consumption (process count, socket buffer
826 * size, etc.) for the uid and impose limits.
827 */
828void
829uihashinit()
830{
831
832 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
833 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
834}
835
836/*
837 * lookup a uidinfo struct for the parameter uid.
838 * uihashtbl_mtx must be locked.
839 */
840static struct uidinfo *
841uilookup(uid)
842 uid_t uid;
843{
844 struct uihashhead *uipp;
845 struct uidinfo *uip;
846
847 mtx_assert(&uihashtbl_mtx, MA_OWNED);
848 uipp = UIHASH(uid);
849 LIST_FOREACH(uip, uipp, ui_hash)
850 if (uip->ui_uid == uid)
851 break;
852
853 return (uip);
854}
855
856/*
857 * Find or allocate a struct uidinfo for a particular uid.
858 * Increase refcount on uidinfo struct returned.
859 * uifree() should be called on a struct uidinfo when released.
860 */
861struct uidinfo *
862uifind(uid)
863 uid_t uid;
864{
865 struct uidinfo *uip;
866
867 mtx_lock(&uihashtbl_mtx);
868 uip = uilookup(uid);
869 if (uip == NULL) {
870 struct uidinfo *old_uip;
871
872 mtx_unlock(&uihashtbl_mtx);
873 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
874 mtx_lock(&uihashtbl_mtx);
875 /*
876 * There's a chance someone created our uidinfo while we
877 * were in malloc and not holding the lock, so we have to
878 * make sure we don't insert a duplicate uidinfo
879 */
880 if ((old_uip = uilookup(uid)) != NULL) {
881 /* someone else beat us to it */
882 free(uip, M_UIDINFO);
883 uip = old_uip;
884 } else {
885 uip->ui_mtxp = mtx_pool_alloc();
886 uip->ui_uid = uid;
887 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
888 }
889 }
890 uihold(uip);
891 mtx_unlock(&uihashtbl_mtx);
892 return (uip);
893}
894
895/*
896 * Place another refcount on a uidinfo struct.
897 */
898void
899uihold(uip)
900 struct uidinfo *uip;
901{
902
903 UIDINFO_LOCK(uip);
904 uip->ui_ref++;
905 UIDINFO_UNLOCK(uip);
906}
907
908/*-
909 * Since uidinfo structs have a long lifetime, we use an
910 * opportunistic refcounting scheme to avoid locking the lookup hash
911 * for each release.
912 *
913 * If the refcount hits 0, we need to free the structure,
914 * which means we need to lock the hash.
915 * Optimal case:
916 * After locking the struct and lowering the refcount, if we find
917 * that we don't need to free, simply unlock and return.
918 * Suboptimal case:
919 * If refcount lowering results in need to free, bump the count
920 * back up, loose the lock and aquire the locks in the proper
921 * order to try again.
922 */
923void
924uifree(uip)
925 struct uidinfo *uip;
926{
927
928 /* Prepare for optimal case. */
929 UIDINFO_LOCK(uip);
930
931 if (--uip->ui_ref != 0) {
932 UIDINFO_UNLOCK(uip);
933 return;
934 }
935
936 /* Prepare for suboptimal case. */
937 uip->ui_ref++;
938 UIDINFO_UNLOCK(uip);
939 mtx_lock(&uihashtbl_mtx);
940 UIDINFO_LOCK(uip);
941
942 /*
943 * We must subtract one from the count again because we backed out
944 * our initial subtraction before dropping the lock.
945 * Since another thread may have added a reference after we dropped the
946 * initial lock we have to test for zero again.
947 */
948 if (--uip->ui_ref == 0) {
949 LIST_REMOVE(uip, ui_hash);
950 mtx_unlock(&uihashtbl_mtx);
951 if (uip->ui_sbsize != 0)
952 /* XXX no %qd in kernel. Truncate. */
953 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
954 uip->ui_uid, (long)uip->ui_sbsize);
955 if (uip->ui_proccnt != 0)
956 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
957 uip->ui_uid, uip->ui_proccnt);
958 UIDINFO_UNLOCK(uip);
959 FREE(uip, M_UIDINFO);
960 return;
961 }
962
963 mtx_unlock(&uihashtbl_mtx);
964 UIDINFO_UNLOCK(uip);
965}
966
967/*
968 * Change the count associated with number of processes
969 * a given user is using. When 'max' is 0, don't enforce a limit
970 */
971int
972chgproccnt(uip, diff, max)
973 struct uidinfo *uip;
974 int diff;
975 int max;
976{
977
978 UIDINFO_LOCK(uip);
979 /* don't allow them to exceed max, but allow subtraction */
980 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
981 UIDINFO_UNLOCK(uip);
982 return (0);
983 }
984 uip->ui_proccnt += diff;
985 if (uip->ui_proccnt < 0)
986 printf("negative proccnt for uid = %d\n", uip->ui_uid);
987 UIDINFO_UNLOCK(uip);
988 return (1);
989}
990
991/*
992 * Change the total socket buffer size a user has used.
993 */
994int
995chgsbsize(uip, hiwat, to, max)
996 struct uidinfo *uip;
997 u_long *hiwat;
998 u_long to;
999 rlim_t max;
1000{
1001 rlim_t new;
1002 int s;
1003
1004 s = splnet();
1005 UIDINFO_LOCK(uip);
1006 new = uip->ui_sbsize + to - *hiwat;
1007 /* don't allow them to exceed max, but allow subtraction */
1008 if (to > *hiwat && new > max) {
1009 splx(s);
1010 UIDINFO_UNLOCK(uip);
1011 return (0);
1012 }
1013 uip->ui_sbsize = new;
1014 *hiwat = to;
1015 if (uip->ui_sbsize < 0)
1016 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1017 splx(s);
1018 UIDINFO_UNLOCK(uip);
1019 return (1);
1020}
781 }
782 return(error);
783}
784
785void
786ruadd(ru, ru2)
787 register struct rusage *ru, *ru2;
788{
789 register long *ip, *ip2;
790 register int i;
791
792 timevaladd(&ru->ru_utime, &ru2->ru_utime);
793 timevaladd(&ru->ru_stime, &ru2->ru_stime);
794 if (ru->ru_maxrss < ru2->ru_maxrss)
795 ru->ru_maxrss = ru2->ru_maxrss;
796 ip = &ru->ru_first; ip2 = &ru2->ru_first;
797 for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
798 *ip++ += *ip2++;
799}
800
801/*
802 * Make a copy of the plimit structure.
803 * We share these structures copy-on-write after fork,
804 * and copy when a limit is changed.
805 */
806struct plimit *
807limcopy(lim)
808 struct plimit *lim;
809{
810 register struct plimit *copy;
811
812 MALLOC(copy, struct plimit *, sizeof(struct plimit),
813 M_SUBPROC, M_WAITOK);
814 bcopy(lim->pl_rlimit, copy->pl_rlimit, sizeof(struct plimit));
815 copy->p_lflags = 0;
816 copy->p_refcnt = 1;
817 return (copy);
818}
819
820/*
821 * Find the uidinfo structure for a uid. This structure is used to
822 * track the total resource consumption (process count, socket buffer
823 * size, etc.) for the uid and impose limits.
824 */
825void
826uihashinit()
827{
828
829 uihashtbl = hashinit(maxproc / 16, M_UIDINFO, &uihash);
830 mtx_init(&uihashtbl_mtx, "uidinfo hash", NULL, MTX_DEF);
831}
832
833/*
834 * lookup a uidinfo struct for the parameter uid.
835 * uihashtbl_mtx must be locked.
836 */
837static struct uidinfo *
838uilookup(uid)
839 uid_t uid;
840{
841 struct uihashhead *uipp;
842 struct uidinfo *uip;
843
844 mtx_assert(&uihashtbl_mtx, MA_OWNED);
845 uipp = UIHASH(uid);
846 LIST_FOREACH(uip, uipp, ui_hash)
847 if (uip->ui_uid == uid)
848 break;
849
850 return (uip);
851}
852
853/*
854 * Find or allocate a struct uidinfo for a particular uid.
855 * Increase refcount on uidinfo struct returned.
856 * uifree() should be called on a struct uidinfo when released.
857 */
858struct uidinfo *
859uifind(uid)
860 uid_t uid;
861{
862 struct uidinfo *uip;
863
864 mtx_lock(&uihashtbl_mtx);
865 uip = uilookup(uid);
866 if (uip == NULL) {
867 struct uidinfo *old_uip;
868
869 mtx_unlock(&uihashtbl_mtx);
870 uip = malloc(sizeof(*uip), M_UIDINFO, M_WAITOK | M_ZERO);
871 mtx_lock(&uihashtbl_mtx);
872 /*
873 * There's a chance someone created our uidinfo while we
874 * were in malloc and not holding the lock, so we have to
875 * make sure we don't insert a duplicate uidinfo
876 */
877 if ((old_uip = uilookup(uid)) != NULL) {
878 /* someone else beat us to it */
879 free(uip, M_UIDINFO);
880 uip = old_uip;
881 } else {
882 uip->ui_mtxp = mtx_pool_alloc();
883 uip->ui_uid = uid;
884 LIST_INSERT_HEAD(UIHASH(uid), uip, ui_hash);
885 }
886 }
887 uihold(uip);
888 mtx_unlock(&uihashtbl_mtx);
889 return (uip);
890}
891
892/*
893 * Place another refcount on a uidinfo struct.
894 */
895void
896uihold(uip)
897 struct uidinfo *uip;
898{
899
900 UIDINFO_LOCK(uip);
901 uip->ui_ref++;
902 UIDINFO_UNLOCK(uip);
903}
904
905/*-
906 * Since uidinfo structs have a long lifetime, we use an
907 * opportunistic refcounting scheme to avoid locking the lookup hash
908 * for each release.
909 *
910 * If the refcount hits 0, we need to free the structure,
911 * which means we need to lock the hash.
912 * Optimal case:
913 * After locking the struct and lowering the refcount, if we find
914 * that we don't need to free, simply unlock and return.
915 * Suboptimal case:
916 * If refcount lowering results in need to free, bump the count
917 * back up, loose the lock and aquire the locks in the proper
918 * order to try again.
919 */
920void
921uifree(uip)
922 struct uidinfo *uip;
923{
924
925 /* Prepare for optimal case. */
926 UIDINFO_LOCK(uip);
927
928 if (--uip->ui_ref != 0) {
929 UIDINFO_UNLOCK(uip);
930 return;
931 }
932
933 /* Prepare for suboptimal case. */
934 uip->ui_ref++;
935 UIDINFO_UNLOCK(uip);
936 mtx_lock(&uihashtbl_mtx);
937 UIDINFO_LOCK(uip);
938
939 /*
940 * We must subtract one from the count again because we backed out
941 * our initial subtraction before dropping the lock.
942 * Since another thread may have added a reference after we dropped the
943 * initial lock we have to test for zero again.
944 */
945 if (--uip->ui_ref == 0) {
946 LIST_REMOVE(uip, ui_hash);
947 mtx_unlock(&uihashtbl_mtx);
948 if (uip->ui_sbsize != 0)
949 /* XXX no %qd in kernel. Truncate. */
950 printf("freeing uidinfo: uid = %d, sbsize = %ld\n",
951 uip->ui_uid, (long)uip->ui_sbsize);
952 if (uip->ui_proccnt != 0)
953 printf("freeing uidinfo: uid = %d, proccnt = %ld\n",
954 uip->ui_uid, uip->ui_proccnt);
955 UIDINFO_UNLOCK(uip);
956 FREE(uip, M_UIDINFO);
957 return;
958 }
959
960 mtx_unlock(&uihashtbl_mtx);
961 UIDINFO_UNLOCK(uip);
962}
963
964/*
965 * Change the count associated with number of processes
966 * a given user is using. When 'max' is 0, don't enforce a limit
967 */
968int
969chgproccnt(uip, diff, max)
970 struct uidinfo *uip;
971 int diff;
972 int max;
973{
974
975 UIDINFO_LOCK(uip);
976 /* don't allow them to exceed max, but allow subtraction */
977 if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) {
978 UIDINFO_UNLOCK(uip);
979 return (0);
980 }
981 uip->ui_proccnt += diff;
982 if (uip->ui_proccnt < 0)
983 printf("negative proccnt for uid = %d\n", uip->ui_uid);
984 UIDINFO_UNLOCK(uip);
985 return (1);
986}
987
988/*
989 * Change the total socket buffer size a user has used.
990 */
991int
992chgsbsize(uip, hiwat, to, max)
993 struct uidinfo *uip;
994 u_long *hiwat;
995 u_long to;
996 rlim_t max;
997{
998 rlim_t new;
999 int s;
1000
1001 s = splnet();
1002 UIDINFO_LOCK(uip);
1003 new = uip->ui_sbsize + to - *hiwat;
1004 /* don't allow them to exceed max, but allow subtraction */
1005 if (to > *hiwat && new > max) {
1006 splx(s);
1007 UIDINFO_UNLOCK(uip);
1008 return (0);
1009 }
1010 uip->ui_sbsize = new;
1011 *hiwat = to;
1012 if (uip->ui_sbsize < 0)
1013 printf("negative sbsize for uid = %d\n", uip->ui_uid);
1014 splx(s);
1015 UIDINFO_UNLOCK(uip);
1016 return (1);
1017}