1/*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1991, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 * (c) UNIX System Laboratories, Inc.
33 * All or some portions of this file are derived from material licensed
34 * to the University of California by American Telephone and Telegraph
35 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36 * the permission of UNIX System Laboratories, Inc.
37 *
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 *    notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 *    notice, this list of conditions and the following disclaimer in the
45 *    documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 *    must display the following acknowledgement:
48 *	This product includes software developed by the University of
49 *	California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 *    may be used to endorse or promote products derived from this software
52 *    without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 *	@(#)kern_resource.c	8.5 (Berkeley) 1/21/94
67 */
68/*
69 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70 * support for mandatory and extensible security protections.  This notice
71 * is included in support of clause 2.2 (b) of the Apple Public License,
72 * Version 2.0.
73 */
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/sysctl.h>
78#include <sys/kernel.h>
79#include <sys/file_internal.h>
80#include <sys/resourcevar.h>
81#include <sys/malloc.h>
82#include <sys/proc_internal.h>
83#include <sys/kauth.h>
84#include <machine/spl.h>
85
86#include <sys/mount_internal.h>
87#include <sys/sysproto.h>
88
89#include <security/audit/audit.h>
90
91#include <machine/vmparam.h>
92
93#include <mach/mach_types.h>
94#include <mach/time_value.h>
95#include <mach/task.h>
96#include <mach/task_info.h>
97#include <mach/vm_map.h>
98#include <mach/mach_vm.h>
99#include <mach/thread_act.h>  /* for thread_policy_set( ) */
100#include <kern/lock.h>
101#include <kern/thread.h>
102
103#include <kern/task.h>
104#include <kern/clock.h>		/* for absolutetime_to_microtime() */
105#include <netinet/in.h>		/* for TRAFFIC_MGT_SO_* */
106#include <sys/socketvar.h>	/* for struct socket */
107
108#include <vm/vm_map.h>
109
110#include <kern/assert.h>
111#include <sys/resource.h>
112
113int	donice(struct proc *curp, struct proc *chgp, int n);
114int	dosetrlimit(struct proc *p, u_int which, struct rlimit *limp);
115int	uthread_get_background_state(uthread_t);
116static void do_background_socket(struct proc *p, thread_t thread, int priority);
117static int do_background_thread(struct proc *curp, thread_t thread, int priority);
118static int do_background_proc(struct proc *curp, struct proc *targetp, int priority);
119static int get_background_proc(struct proc *curp, struct proc *targetp, int *priority);
120void proc_apply_task_networkbg_internal(proc_t, thread_t);
121void proc_restore_task_networkbg_internal(proc_t, thread_t);
122int proc_pid_rusage(int pid, int flavor, user_addr_t buf, int32_t *retval);
123void gather_rusage_info_v2(proc_t p, struct rusage_info_v2 *ru, int flavor);
124int fill_task_rusage_v2(task_t task, struct rusage_info_v2 *ri);
125static void rusage_info_v2_to_v0(struct rusage_info_v0 *ri_v0, struct rusage_info_v2 *ri_v2);
126static void rusage_info_v2_to_v1(struct rusage_info_v1 *ri_v1, struct rusage_info_v2 *ri_v2);
127
128int proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie);
129
130rlim_t maxdmap = MAXDSIZ;	/* XXX */
131rlim_t maxsmap = MAXSSIZ - PAGE_SIZE;	/* XXX */
132
133/*
134 * Limits on the number of open files per process, and the number
135 * of child processes per process.
136 *
137 * Note: would be in kern/subr_param.c in FreeBSD.
138 */
139__private_extern__ int maxfilesperproc = OPEN_MAX;		/* per-proc open files limit */
140
141SYSCTL_INT(_kern, KERN_MAXPROCPERUID, maxprocperuid, CTLFLAG_RW | CTLFLAG_LOCKED,
142    		&maxprocperuid, 0, "Maximum processes allowed per userid" );
143
144SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW | CTLFLAG_LOCKED,
145    		&maxfilesperproc, 0, "Maximum files allowed open per process" );
146
147/* Args and fn for proc_iteration callback used in setpriority */
148struct puser_nice_args {
149	proc_t curp;
150	int	prio;
151	id_t	who;
152	int *	foundp;
153	int *	errorp;
154};
155static int puser_donice_callback(proc_t p, void * arg);
156
157
158/* Args and fn for proc_iteration callback used in setpriority */
159struct ppgrp_nice_args {
160	proc_t curp;
161	int	prio;
162	int *	foundp;
163	int *	errorp;
164};
165static int ppgrp_donice_callback(proc_t p, void * arg);
166
167/*
168 * Resource controls and accounting.
169 */
170int
171getpriority(struct proc *curp, struct getpriority_args *uap, int32_t *retval)
172{
173	struct proc *p;
174	int low = PRIO_MAX + 1;
175	kauth_cred_t my_cred;
176	int refheld = 0;
177	int error = 0;
178
179	/* would also test (uap->who < 0), but id_t is unsigned */
180	if (uap->who > 0x7fffffff)
181		return (EINVAL);
182
183	switch (uap->which) {
184
185	case PRIO_PROCESS:
186		if (uap->who == 0) {
187			p = curp;
188			low = p->p_nice;
189		} else {
190			p = proc_find(uap->who);
191			if (p == 0)
192				break;
193			low = p->p_nice;
194			proc_rele(p);
195
196		}
197		break;
198
199	case PRIO_PGRP: {
200		struct pgrp *pg = PGRP_NULL;
201
202		if (uap->who == 0) {
203			/* returns the pgrp to ref */
204			pg = proc_pgrp(curp);
205		 } else if ((pg = pgfind(uap->who)) == PGRP_NULL) {
206			break;
207		}
208		/* No need for iteration as it is a simple scan */
209		pgrp_lock(pg);
210		for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) {
211			if (p->p_nice < low)
212				low = p->p_nice;
213		}
214		pgrp_unlock(pg);
215		pg_rele(pg);
216		break;
217	}
218
219	case PRIO_USER:
220		if (uap->who == 0)
221			uap->who = kauth_cred_getuid(kauth_cred_get());
222
223		proc_list_lock();
224
225		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
226			my_cred = kauth_cred_proc_ref(p);
227			if (kauth_cred_getuid(my_cred) == uap->who &&
228			    p->p_nice < low)
229				low = p->p_nice;
230			kauth_cred_unref(&my_cred);
231		}
232
233		proc_list_unlock();
234
235		break;
236
237	case PRIO_DARWIN_THREAD:
238		/* we currently only support the current thread */
239		if (uap->who != 0)
240			return (EINVAL);
241
242		low = proc_get_task_policy(current_task(), current_thread(), TASK_POLICY_INTERNAL, TASK_POLICY_DARWIN_BG);
243
244		break;
245
246	case PRIO_DARWIN_PROCESS:
247		if (uap->who == 0) {
248			p = curp;
249		} else {
250			p = proc_find(uap->who);
251			if (p == PROC_NULL)
252				break;
253			refheld = 1;
254		}
255
256		error = get_background_proc(curp, p, &low);
257
258		if (refheld)
259			proc_rele(p);
260		if (error)
261			return (error);
262		break;
263
264	default:
265		return (EINVAL);
266	}
267	if (low == PRIO_MAX + 1)
268		return (ESRCH);
269	*retval = low;
270	return (0);
271}
272
273/* call back function used for proc iteration in PRIO_USER */
274static int
275puser_donice_callback(proc_t p, void * arg)
276{
277	int error, n;
278	struct puser_nice_args * pun = (struct puser_nice_args *)arg;
279	kauth_cred_t my_cred;
280
281	my_cred = kauth_cred_proc_ref(p);
282	if (kauth_cred_getuid(my_cred) == pun->who) {
283		error = donice(pun->curp, p, pun->prio);
284		if (pun->errorp != NULL)
285			*pun->errorp = error;
286		if (pun->foundp != NULL) {
287			n = *pun->foundp;
288			*pun->foundp = n+1;
289		}
290	}
291	kauth_cred_unref(&my_cred);
292
293	return(PROC_RETURNED);
294}
295
296/* call back function used for proc iteration in PRIO_PGRP */
297static int
298ppgrp_donice_callback(proc_t p, void * arg)
299{
300	int error;
301	struct ppgrp_nice_args * pun = (struct ppgrp_nice_args *)arg;
302	int n;
303
304	error = donice(pun->curp, p, pun->prio);
305	if (pun->errorp != NULL)
306		*pun->errorp = error;
307	if (pun->foundp!= NULL) {
308		n = *pun->foundp;
309		*pun->foundp = n+1;
310	}
311
312	return(PROC_RETURNED);
313}
314
315/*
316 * Returns:	0			Success
317 *		EINVAL
318 *		ESRCH
319 *	donice:EPERM
320 *	donice:EACCES
321 */
322/* ARGSUSED */
323int
324setpriority(struct proc *curp, struct setpriority_args *uap, __unused int32_t *retval)
325{
326	struct proc *p;
327	int found = 0, error = 0;
328	int refheld = 0;
329
330	AUDIT_ARG(cmd, uap->which);
331	AUDIT_ARG(owner, uap->who, 0);
332	AUDIT_ARG(value32, uap->prio);
333
334	/* would also test (uap->who < 0), but id_t is unsigned */
335	if (uap->who > 0x7fffffff)
336		return (EINVAL);
337
338	switch (uap->which) {
339
340	case PRIO_PROCESS:
341		if (uap->who == 0)
342			p = curp;
343		else {
344			p = proc_find(uap->who);
345			if (p == 0)
346				break;
347			refheld = 1;
348		}
349		error = donice(curp, p, uap->prio);
350		found++;
351		if (refheld != 0)
352			proc_rele(p);
353		break;
354
355	case PRIO_PGRP: {
356		struct pgrp *pg = PGRP_NULL;
357		struct ppgrp_nice_args ppgrp;
358
359		if (uap->who == 0) {
360			pg = proc_pgrp(curp);
361		 } else if ((pg = pgfind(uap->who)) == PGRP_NULL)
362			break;
363
364		ppgrp.curp = curp;
365		ppgrp.prio = uap->prio;
366		ppgrp.foundp = &found;
367		ppgrp.errorp = &error;
368
369		/* PGRP_DROPREF drops the reference on process group */
370		pgrp_iterate(pg, PGRP_DROPREF, ppgrp_donice_callback, (void *)&ppgrp, NULL, NULL);
371
372		break;
373	}
374
375	case PRIO_USER: {
376		struct puser_nice_args punice;
377
378		if (uap->who == 0)
379			uap->who = kauth_cred_getuid(kauth_cred_get());
380
381		punice.curp = curp;
382		punice.prio = uap->prio;
383		punice.who = uap->who;
384		punice.foundp = &found;
385		error = 0;
386		punice.errorp = &error;
387		proc_iterate(PROC_ALLPROCLIST, puser_donice_callback, (void *)&punice, NULL, NULL);
388
389		break;
390	}
391
392	case PRIO_DARWIN_THREAD: {
393		/* we currently only support the current thread */
394		if (uap->who != 0)
395			return (EINVAL);
396
397		error = do_background_thread(curp, current_thread(), uap->prio);
398		found++;
399		break;
400	}
401
402	case PRIO_DARWIN_PROCESS: {
403		if (uap->who == 0)
404			p = curp;
405		else {
406			p = proc_find(uap->who);
407			if (p == 0)
408				break;
409			refheld = 1;
410		}
411
412		error = do_background_proc(curp, p, uap->prio);
413
414		found++;
415		if (refheld != 0)
416			proc_rele(p);
417		break;
418	}
419
420	default:
421		return (EINVAL);
422	}
423	if (found == 0)
424		return (ESRCH);
425	return (error);
426}
427
428
429/*
430 * Returns:	0			Success
431 *		EPERM
432 *		EACCES
433 *	mac_check_proc_sched:???
434 */
435int
436donice(struct proc *curp, struct proc *chgp, int n)
437{
438	int error = 0;
439	kauth_cred_t ucred;
440	kauth_cred_t my_cred;
441
442	ucred = kauth_cred_proc_ref(curp);
443	my_cred = kauth_cred_proc_ref(chgp);
444
445	if (suser(ucred, NULL) && kauth_cred_getruid(ucred) &&
446	    kauth_cred_getuid(ucred) != kauth_cred_getuid(my_cred) &&
447	    kauth_cred_getruid(ucred) != kauth_cred_getuid(my_cred)) {
448		error = EPERM;
449		goto out;
450	}
451	if (n > PRIO_MAX)
452		n = PRIO_MAX;
453	if (n < PRIO_MIN)
454		n = PRIO_MIN;
455	if (n < chgp->p_nice && suser(ucred, &curp->p_acflag)) {
456		error = EACCES;
457		goto out;
458	}
459#if CONFIG_MACF
460	error = mac_proc_check_sched(curp, chgp);
461	if (error)
462		goto out;
463#endif
464	proc_lock(chgp);
465	chgp->p_nice = n;
466	proc_unlock(chgp);
467	(void)resetpriority(chgp);
468out:
469	kauth_cred_unref(&ucred);
470	kauth_cred_unref(&my_cred);
471	return (error);
472}
473
474static int
475get_background_proc(struct proc *curp, struct proc *targetp, int *priority)
476{
477	int external = 0;
478	int error = 0;
479	kauth_cred_t ucred, target_cred;
480
481	ucred = kauth_cred_get();
482	target_cred = kauth_cred_proc_ref(targetp);
483
484	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
485	    kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
486	    kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred)) {
487		error = EPERM;
488		goto out;
489	}
490
491	external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
492
493	*priority = proc_get_task_policy(current_task(), THREAD_NULL, external, TASK_POLICY_DARWIN_BG);
494
495out:
496	kauth_cred_unref(&target_cred);
497	return (error);
498}
499
500static int
501do_background_proc(struct proc *curp, struct proc *targetp, int priority)
502{
503#if !CONFIG_MACF
504#pragma unused(curp)
505#endif
506	int error = 0;
507	kauth_cred_t ucred;
508	kauth_cred_t target_cred;
509	int external;
510	int flavor;
511	int enable;
512
513	ucred = kauth_cred_get();
514	target_cred = kauth_cred_proc_ref(targetp);
515
516	if (!kauth_cred_issuser(ucred) && kauth_cred_getruid(ucred) &&
517		kauth_cred_getuid(ucred) != kauth_cred_getuid(target_cred) &&
518		kauth_cred_getruid(ucred) != kauth_cred_getuid(target_cred))
519	{
520		error = EPERM;
521		goto out;
522	}
523
524#if CONFIG_MACF
525	error = mac_proc_check_sched(curp, targetp);
526	if (error)
527		goto out;
528#endif
529
530	external = (curp == targetp) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
531
532	switch (priority) {
533		case PRIO_DARWIN_NONUI:
534			flavor = TASK_POLICY_GPU_DENY;
535			enable = TASK_POLICY_ENABLE;
536			break;
537		case PRIO_DARWIN_BG:
538			flavor = TASK_POLICY_DARWIN_BG;
539			enable = TASK_POLICY_ENABLE;
540			break;
541		default:
542			/*
543			 * DARWIN_BG and GPU_DENY disable are overloaded,
544			 * so we need to turn them both off at the same time
545			 *
546			 * TODO: It would be nice to fail if priority != 0
547			 */
548			flavor = TASK_POLICY_DARWIN_BG_AND_GPU;
549			enable = TASK_POLICY_DISABLE;
550			break;
551	}
552
553	proc_set_task_policy(proc_task(targetp), THREAD_NULL, external, flavor, enable);
554
555out:
556	kauth_cred_unref(&target_cred);
557	return (error);
558}
559
560static void
561do_background_socket(struct proc *p, thread_t thread, int priority)
562{
563#if SOCKETS
564	struct filedesc                     *fdp;
565	struct fileproc                     *fp;
566	int                                 i;
567
568	if (priority == PRIO_DARWIN_BG) {
569		/*
570		 * For PRIO_DARWIN_PROCESS (thread is NULL), simply mark
571		 * the sockets with the background flag.  There's nothing
572		 * to do here for the PRIO_DARWIN_THREAD case.
573		 */
574		if (thread == THREAD_NULL) {
575			proc_fdlock(p);
576			fdp = p->p_fd;
577
578			for (i = 0; i < fdp->fd_nfiles; i++) {
579				struct socket       *sockp;
580
581				fp = fdp->fd_ofiles[i];
582				if (fp == NULL || (fdp->fd_ofileflags[i] & UF_RESERVED) != 0 ||
583				    FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET) {
584					continue;
585				}
586				sockp = (struct socket *)fp->f_fglob->fg_data;
587				socket_set_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
588				sockp->so_background_thread = NULL;
589			}
590			proc_fdunlock(p);
591		}
592
593	} else {
594
595		/* disable networking IO throttle.
596		 * NOTE - It is a known limitation of the current design that we
597		 * could potentially clear TRAFFIC_MGT_SO_BACKGROUND bit for
598		 * sockets created by other threads within this process.
599		 */
600		proc_fdlock(p);
601		fdp = p->p_fd;
602		for ( i = 0; i < fdp->fd_nfiles; i++ ) {
603			struct socket       *sockp;
604
605			fp = fdp->fd_ofiles[ i ];
606			if ( fp == NULL || (fdp->fd_ofileflags[ i ] & UF_RESERVED) != 0 ||
607			    FILEGLOB_DTYPE(fp->f_fglob) != DTYPE_SOCKET ) {
608				continue;
609			}
610			sockp = (struct socket *)fp->f_fglob->fg_data;
611			/* skip if only clearing this thread's sockets */
612			if ((thread) && (sockp->so_background_thread != thread)) {
613				continue;
614			}
615			socket_clear_traffic_mgt_flags(sockp, TRAFFIC_MGT_SO_BACKGROUND);
616			sockp->so_background_thread = NULL;
617		}
618		proc_fdunlock(p);
619	}
620#else
621#pragma unused(p, thread, priority)
622#endif
623}
624
625
626/*
627 * do_background_thread
628 * Returns:     0                       Success
629 *              EPERM                   Tried to background while in vfork
630 * XXX - todo - does this need a MACF hook?
631 */
632static int
633do_background_thread(struct proc *curp, thread_t thread, int priority)
634{
635	struct uthread *ut;
636	int enable, external;
637
638	ut = get_bsdthread_info(thread);
639
640	/* Backgrounding is unsupported for threads in vfork */
641	if ((ut->uu_flag & UT_VFORK) != 0)
642		return(EPERM);
643
644	/* TODO: Fail if someone passes something besides 0 or PRIO_DARWIN_BG */
645	enable   = (priority == PRIO_DARWIN_BG) ? TASK_POLICY_ENABLE   : TASK_POLICY_DISABLE;
646	external = (current_thread() == thread) ? TASK_POLICY_INTERNAL : TASK_POLICY_EXTERNAL;
647
648	proc_set_task_policy_thread(curp->task, thread_tid(thread), external,
649	                            TASK_POLICY_DARWIN_BG, enable);
650
651	return(0);
652}
653
654
655/*
656 * Returns:	0			Success
657 *	copyin:EFAULT
658 *	dosetrlimit:
659 */
660/* ARGSUSED */
661int
662setrlimit(struct proc *p, struct setrlimit_args *uap, __unused int32_t *retval)
663{
664	struct rlimit alim;
665	int error;
666
667	if ((error = copyin(uap->rlp, (caddr_t)&alim,
668	    sizeof (struct rlimit))))
669		return (error);
670
671	return (dosetrlimit(p, uap->which, &alim));
672}
673
674/*
675 * Returns:	0			Success
676 *		EINVAL
677 *		ENOMEM			Cannot copy limit structure
678 *	suser:EPERM
679 *
680 * Notes:	EINVAL is returned both for invalid arguments, and in the
681 *		case that the current usage (e.g. RLIMIT_STACK) is already
682 *		in excess of the requested limit.
683 */
684int
685dosetrlimit(struct proc *p, u_int which, struct rlimit *limp)
686{
687	struct rlimit *alimp;
688	int error;
689	kern_return_t	kr;
690	int posix = (which & _RLIMIT_POSIX_FLAG) ? 1 : 0;
691
692	/* Mask out POSIX flag, saved above */
693	which &= ~_RLIMIT_POSIX_FLAG;
694
695	if (which >= RLIM_NLIMITS)
696		return (EINVAL);
697
698	alimp = &p->p_rlimit[which];
699	if (limp->rlim_cur > limp->rlim_max)
700		return EINVAL;
701
702	if (limp->rlim_cur > alimp->rlim_max ||
703	    limp->rlim_max > alimp->rlim_max)
704		if ((error = suser(kauth_cred_get(), &p->p_acflag))) {
705			return (error);
706	}
707
708	proc_limitblock(p);
709
710	if ((error = proc_limitreplace(p)) != 0) {
711		proc_limitunblock(p);
712		return(error);
713	}
714
715	alimp = &p->p_rlimit[which];
716
717	switch (which) {
718
719	case RLIMIT_CPU:
720		if (limp->rlim_cur == RLIM_INFINITY) {
721			task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
722			timerclear(&p->p_rlim_cpu);
723		}
724		else {
725			task_absolutetime_info_data_t	tinfo;
726			mach_msg_type_number_t			count;
727			struct timeval					ttv, tv;
728			clock_sec_t						tv_sec;
729			clock_usec_t					tv_usec;
730
731			count = TASK_ABSOLUTETIME_INFO_COUNT;
732			task_info(p->task, TASK_ABSOLUTETIME_INFO,
733							  	(task_info_t)&tinfo, &count);
734			absolutetime_to_microtime(tinfo.total_user + tinfo.total_system,
735									  &tv_sec, &tv_usec);
736			ttv.tv_sec = tv_sec;
737			ttv.tv_usec = tv_usec;
738
739			tv.tv_sec = (limp->rlim_cur > __INT_MAX__ ? __INT_MAX__ : limp->rlim_cur);
740			tv.tv_usec = 0;
741			timersub(&tv, &ttv, &p->p_rlim_cpu);
742
743			timerclear(&tv);
744			if (timercmp(&p->p_rlim_cpu, &tv, >))
745				task_vtimer_set(p->task, TASK_VTIMER_RLIM);
746			else {
747				task_vtimer_clear(p->task, TASK_VTIMER_RLIM);
748
749				timerclear(&p->p_rlim_cpu);
750
751				psignal(p, SIGXCPU);
752			}
753		}
754		break;
755
756	case RLIMIT_DATA:
757		if (limp->rlim_cur > maxdmap)
758			limp->rlim_cur = maxdmap;
759		if (limp->rlim_max > maxdmap)
760			limp->rlim_max = maxdmap;
761		break;
762
763	case RLIMIT_STACK:
764		/* Disallow illegal stack size instead of clipping */
765		if (limp->rlim_cur > maxsmap ||
766		    limp->rlim_max > maxsmap) {
767			if (posix) {
768				error = EINVAL;
769				goto out;
770			}
771			else {
772				/*
773				 * 4797860 - workaround poorly written installers by
774				 * doing previous implementation (< 10.5) when caller
775				 * is non-POSIX conforming.
776				 */
777				if (limp->rlim_cur > maxsmap)
778					limp->rlim_cur = maxsmap;
779				if (limp->rlim_max > maxsmap)
780					limp->rlim_max = maxsmap;
781			}
782		}
783
784		/*
785		 * Stack is allocated to the max at exec time with only
786		 * "rlim_cur" bytes accessible.  If stack limit is going
787		 * up make more accessible, if going down make inaccessible.
788		 */
789		if (limp->rlim_cur > alimp->rlim_cur) {
790			user_addr_t addr;
791			user_size_t size;
792
793				/* grow stack */
794				size = round_page_64(limp->rlim_cur);
795				size -= round_page_64(alimp->rlim_cur);
796
797			addr = p->user_stack - round_page_64(limp->rlim_cur);
798			kr = mach_vm_protect(current_map(),
799					     addr, size,
800					     FALSE, VM_PROT_DEFAULT);
801			if (kr != KERN_SUCCESS) {
802				error =  EINVAL;
803				goto out;
804			}
805		} else if (limp->rlim_cur < alimp->rlim_cur) {
806			user_addr_t addr;
807			user_size_t size;
808			user_addr_t cur_sp;
809
810				/* shrink stack */
811
812			/*
813			 * First check if new stack limit would agree
814			 * with current stack usage.
815			 * Get the current thread's stack pointer...
816			 */
817			cur_sp = thread_adjuserstack(current_thread(),
818						     0);
819			if (cur_sp <= p->user_stack &&
820			    cur_sp > (p->user_stack -
821				      round_page_64(alimp->rlim_cur))) {
822				/* stack pointer is in main stack */
823				if (cur_sp <= (p->user_stack -
824					       round_page_64(limp->rlim_cur))) {
825					/*
826					 * New limit would cause
827					 * current usage to be invalid:
828					 * reject new limit.
829					 */
830					error =  EINVAL;
831					goto out;
832				}
833			} else {
834				/* not on the main stack: reject */
835				error =  EINVAL;
836				goto out;
837			}
838
839			size = round_page_64(alimp->rlim_cur);
840			size -= round_page_64(limp->rlim_cur);
841
842			addr = p->user_stack - round_page_64(alimp->rlim_cur);
843
844			kr = mach_vm_protect(current_map(),
845					     addr, size,
846					     FALSE, VM_PROT_NONE);
847			if (kr != KERN_SUCCESS) {
848				error =  EINVAL;
849				goto out;
850			}
851		} else {
852			/* no change ... */
853		}
854		break;
855
856	case RLIMIT_NOFILE:
857		/*
858		 * Only root can set the maxfiles limits, as it is
859		 * systemwide resource.  If we are expecting POSIX behavior,
860		 * instead of clamping the value, return EINVAL.  We do this
861		 * because historically, people have been able to attempt to
862		 * set RLIM_INFINITY to get "whatever the maximum is".
863		*/
864		if ( kauth_cred_issuser(kauth_cred_get()) ) {
865			if (limp->rlim_cur != alimp->rlim_cur &&
866			    limp->rlim_cur > (rlim_t)maxfiles) {
867			    	if (posix) {
868					error =  EINVAL;
869					goto out;
870				}
871				limp->rlim_cur = maxfiles;
872			}
873			if (limp->rlim_max != alimp->rlim_max &&
874			    limp->rlim_max > (rlim_t)maxfiles)
875				limp->rlim_max = maxfiles;
876		}
877		else {
878			if (limp->rlim_cur != alimp->rlim_cur &&
879			    limp->rlim_cur > (rlim_t)maxfilesperproc) {
880			    	if (posix) {
881					error =  EINVAL;
882					goto out;
883				}
884				limp->rlim_cur = maxfilesperproc;
885			}
886			if (limp->rlim_max != alimp->rlim_max &&
887			    limp->rlim_max > (rlim_t)maxfilesperproc)
888				limp->rlim_max = maxfilesperproc;
889		}
890		break;
891
892	case RLIMIT_NPROC:
893		/*
894		 * Only root can set to the maxproc limits, as it is
895		 * systemwide resource; all others are limited to
896		 * maxprocperuid (presumably less than maxproc).
897		 */
898		if ( kauth_cred_issuser(kauth_cred_get()) ) {
899			if (limp->rlim_cur > (rlim_t)maxproc)
900				limp->rlim_cur = maxproc;
901			if (limp->rlim_max > (rlim_t)maxproc)
902				limp->rlim_max = maxproc;
903		}
904		else {
905			if (limp->rlim_cur > (rlim_t)maxprocperuid)
906				limp->rlim_cur = maxprocperuid;
907			if (limp->rlim_max > (rlim_t)maxprocperuid)
908				limp->rlim_max = maxprocperuid;
909		}
910		break;
911
912	case RLIMIT_MEMLOCK:
913		/*
914		 * Tell the Mach VM layer about the new limit value.
915		 */
916
917		vm_map_set_user_wire_limit(current_map(), limp->rlim_cur);
918		break;
919
920	} /* switch... */
921	proc_lock(p);
922	*alimp = *limp;
923	proc_unlock(p);
924	error = 0;
925out:
926	proc_limitunblock(p);
927	return (error);
928}
929
930/* ARGSUSED */
931int
932getrlimit(struct proc *p, struct getrlimit_args *uap, __unused int32_t *retval)
933{
934	struct rlimit lim;
935
936	/*
937	 * Take out flag now in case we need to use it to trigger variant
938	 * behaviour later.
939	 */
940	uap->which &= ~_RLIMIT_POSIX_FLAG;
941
942	if (uap->which >= RLIM_NLIMITS)
943		return (EINVAL);
944	proc_limitget(p, uap->which, &lim);
945	return (copyout((caddr_t)&lim,
946	    		uap->rlp, sizeof (struct rlimit)));
947}
948
949/*
950 * Transform the running time and tick information in proc p into user,
951 * system, and interrupt time usage.
952 */
953/* No lock on proc is held for this.. */
954void
955calcru(struct proc *p, struct timeval *up, struct timeval *sp, struct timeval *ip)
956{
957	task_t			task;
958
959	timerclear(up);
960	timerclear(sp);
961	if (ip != NULL)
962		timerclear(ip);
963
964	task = p->task;
965	if (task) {
966		mach_task_basic_info_data_t tinfo;
967		task_thread_times_info_data_t ttimesinfo;
968		task_events_info_data_t teventsinfo;
969		mach_msg_type_number_t task_info_count, task_ttimes_count;
970		mach_msg_type_number_t task_events_count;
971		struct timeval ut,st;
972
973		task_info_count	= MACH_TASK_BASIC_INFO_COUNT;
974		task_info(task, MACH_TASK_BASIC_INFO,
975			  (task_info_t)&tinfo, &task_info_count);
976		ut.tv_sec = tinfo.user_time.seconds;
977		ut.tv_usec = tinfo.user_time.microseconds;
978		st.tv_sec = tinfo.system_time.seconds;
979		st.tv_usec = tinfo.system_time.microseconds;
980		timeradd(&ut, up, up);
981		timeradd(&st, sp, sp);
982
983		task_ttimes_count = TASK_THREAD_TIMES_INFO_COUNT;
984		task_info(task, TASK_THREAD_TIMES_INFO,
985			  (task_info_t)&ttimesinfo, &task_ttimes_count);
986
987		ut.tv_sec = ttimesinfo.user_time.seconds;
988		ut.tv_usec = ttimesinfo.user_time.microseconds;
989		st.tv_sec = ttimesinfo.system_time.seconds;
990		st.tv_usec = ttimesinfo.system_time.microseconds;
991		timeradd(&ut, up, up);
992		timeradd(&st, sp, sp);
993
994		task_events_count = TASK_EVENTS_INFO_COUNT;
995		task_info(task, TASK_EVENTS_INFO,
996			  (task_info_t)&teventsinfo, &task_events_count);
997
998		/*
999		 * No need to lock "p":  this does not need to be
1000		 * completely consistent, right ?
1001		 */
1002		p->p_stats->p_ru.ru_minflt = (teventsinfo.faults -
1003					      teventsinfo.pageins);
1004		p->p_stats->p_ru.ru_majflt = teventsinfo.pageins;
1005		p->p_stats->p_ru.ru_nivcsw = (teventsinfo.csw -
1006					      p->p_stats->p_ru.ru_nvcsw);
1007		if (p->p_stats->p_ru.ru_nivcsw < 0)
1008			p->p_stats->p_ru.ru_nivcsw = 0;
1009
1010		p->p_stats->p_ru.ru_maxrss = tinfo.resident_size_max;
1011	}
1012}
1013
1014__private_extern__ void munge_user64_rusage(struct rusage *a_rusage_p, struct user64_rusage *a_user_rusage_p);
1015__private_extern__ void munge_user32_rusage(struct rusage *a_rusage_p, struct user32_rusage *a_user_rusage_p);
1016
1017/* ARGSUSED */
1018int
1019getrusage(struct proc *p, struct getrusage_args *uap, __unused int32_t *retval)
1020{
1021	struct rusage *rup, rubuf;
1022	struct user64_rusage rubuf64;
1023	struct user32_rusage rubuf32;
1024	size_t retsize = sizeof(rubuf);			/* default: 32 bits */
1025	caddr_t retbuf = (caddr_t)&rubuf;		/* default: 32 bits */
1026	struct timeval utime;
1027	struct timeval stime;
1028
1029
1030	switch (uap->who) {
1031	case RUSAGE_SELF:
1032		calcru(p, &utime, &stime, NULL);
1033		proc_lock(p);
1034		rup = &p->p_stats->p_ru;
1035		rup->ru_utime = utime;
1036		rup->ru_stime = stime;
1037
1038		rubuf = *rup;
1039		proc_unlock(p);
1040
1041		break;
1042
1043	case RUSAGE_CHILDREN:
1044		proc_lock(p);
1045		rup = &p->p_stats->p_cru;
1046		rubuf = *rup;
1047		proc_unlock(p);
1048		break;
1049
1050	default:
1051		return (EINVAL);
1052	}
1053	if (IS_64BIT_PROCESS(p)) {
1054		retsize = sizeof(rubuf64);
1055		retbuf = (caddr_t)&rubuf64;
1056		munge_user64_rusage(&rubuf, &rubuf64);
1057	} else {
1058		retsize = sizeof(rubuf32);
1059		retbuf = (caddr_t)&rubuf32;
1060		munge_user32_rusage(&rubuf, &rubuf32);
1061	}
1062
1063	return (copyout(retbuf, uap->rusage, retsize));
1064}
1065
1066void
1067ruadd(struct rusage *ru, struct rusage *ru2)
1068{
1069	long *ip, *ip2;
1070	long i;
1071
1072	timeradd(&ru->ru_utime, &ru2->ru_utime, &ru->ru_utime);
1073	timeradd(&ru->ru_stime, &ru2->ru_stime, &ru->ru_stime);
1074	if (ru->ru_maxrss < ru2->ru_maxrss)
1075		ru->ru_maxrss = ru2->ru_maxrss;
1076	ip = &ru->ru_first; ip2 = &ru2->ru_first;
1077	for (i = &ru->ru_last - &ru->ru_first; i >= 0; i--)
1078		*ip++ += *ip2++;
1079}
1080
1081/*
1082 * Add the rusage stats of child in parent.
1083 *
1084 * It adds rusage statistics of child process and statistics of all its
1085 * children to its parent.
1086 *
1087 * Note: proc lock of parent should be held while calling this function.
1088 */
1089void
1090update_rusage_info_child(struct rusage_info_child *ri, struct rusage_info_v2 *ri2)
1091{
1092	ri->ri_child_user_time += (ri2->ri_user_time +
1093					ri2->ri_child_user_time);
1094	ri->ri_child_system_time += (ri2->ri_system_time +
1095					ri2->ri_child_system_time);
1096	ri->ri_child_pkg_idle_wkups += (ri2->ri_pkg_idle_wkups +
1097					ri2->ri_child_pkg_idle_wkups);
1098	ri->ri_child_interrupt_wkups += (ri2->ri_interrupt_wkups +
1099					ri2->ri_child_interrupt_wkups);
1100	ri->ri_child_pageins += (ri2->ri_pageins +
1101					ri2->ri_child_pageins);
1102	ri->ri_child_elapsed_abstime += ((ri2->ri_proc_exit_abstime -
1103		ri2->ri_proc_start_abstime) + ri2->ri_child_elapsed_abstime);
1104}
1105
1106void
1107proc_limitget(proc_t p, int which, struct rlimit * limp)
1108{
1109	proc_list_lock();
1110	limp->rlim_cur = p->p_rlimit[which].rlim_cur;
1111	limp->rlim_max = p->p_rlimit[which].rlim_max;
1112	proc_list_unlock();
1113}
1114
1115
1116void
1117proc_limitdrop(proc_t p, int exiting)
1118{
1119	struct  plimit * freelim = NULL;
1120	struct  plimit * freeoldlim = NULL;
1121
1122	proc_list_lock();
1123
1124	if (--p->p_limit->pl_refcnt == 0) {
1125		freelim = p->p_limit;
1126		p->p_limit = NULL;
1127	}
1128	if ((exiting != 0) && (p->p_olimit != NULL) && (--p->p_olimit->pl_refcnt == 0)) {
1129		freeoldlim =  p->p_olimit;
1130		p->p_olimit = NULL;
1131	}
1132
1133	proc_list_unlock();
1134	if (freelim != NULL)
1135		FREE_ZONE(freelim, sizeof *p->p_limit, M_PLIMIT);
1136	if (freeoldlim != NULL)
1137		FREE_ZONE(freeoldlim, sizeof *p->p_olimit, M_PLIMIT);
1138}
1139
1140
1141void
1142proc_limitfork(proc_t parent, proc_t child)
1143{
1144	proc_list_lock();
1145	child->p_limit = parent->p_limit;
1146	child->p_limit->pl_refcnt++;
1147	child->p_olimit = NULL;
1148	proc_list_unlock();
1149}
1150
1151void
1152proc_limitblock(proc_t p)
1153{
1154	proc_lock(p);
1155	while (p->p_lflag & P_LLIMCHANGE) {
1156		p->p_lflag |= P_LLIMWAIT;
1157		msleep(&p->p_olimit, &p->p_mlock, 0, "proc_limitblock", NULL);
1158	}
1159	p->p_lflag |= P_LLIMCHANGE;
1160	proc_unlock(p);
1161
1162}
1163
1164
1165void
1166proc_limitunblock(proc_t p)
1167{
1168	proc_lock(p);
1169	p->p_lflag &= ~P_LLIMCHANGE;
1170	if (p->p_lflag & P_LLIMWAIT) {
1171		p->p_lflag &= ~P_LLIMWAIT;
1172		wakeup(&p->p_olimit);
1173	}
1174	proc_unlock(p);
1175}
1176
1177/* This is called behind serialization provided by proc_limitblock/unlbock */
1178int
1179proc_limitreplace(proc_t p)
1180{
1181	struct plimit *copy;
1182
1183
1184	proc_list_lock();
1185
1186	if (p->p_limit->pl_refcnt == 1) {
1187		proc_list_unlock();
1188		return(0);
1189	}
1190
1191	proc_list_unlock();
1192
1193	MALLOC_ZONE(copy, struct plimit *,
1194			sizeof(struct plimit), M_PLIMIT, M_WAITOK);
1195	if (copy == NULL) {
1196		return(ENOMEM);
1197	}
1198
1199	proc_list_lock();
1200	bcopy(p->p_limit->pl_rlimit, copy->pl_rlimit,
1201	    sizeof(struct rlimit) * RLIM_NLIMITS);
1202	copy->pl_refcnt = 1;
1203	/* hang on to reference to old till process exits */
1204	p->p_olimit = p->p_limit;
1205	p->p_limit = copy;
1206	proc_list_unlock();
1207
1208	return(0);
1209}
1210
1211/*
1212 * iopolicysys
1213 *
1214 * Description:	System call MUX for use in manipulating I/O policy attributes of the current process or thread
1215 *
1216 * Parameters:	cmd				Policy command
1217 *		arg				Pointer to policy arguments
1218 *
1219 * Returns:	0				Success
1220 *		EINVAL				Invalid command or invalid policy arguments
1221 *
1222 */
1223
1224static int
1225iopolicysys_disk(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1226static int
1227iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param);
1228
1229int
1230iopolicysys(struct proc *p, struct iopolicysys_args *uap, __unused int32_t *retval)
1231{
1232	int     error = 0;
1233	struct _iopol_param_t iop_param;
1234
1235	if ((error = copyin(uap->arg, &iop_param, sizeof(iop_param))) != 0)
1236		goto out;
1237
1238	switch (iop_param.iop_iotype) {
1239		case IOPOL_TYPE_DISK:
1240			error = iopolicysys_disk(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1241			if (error)
1242				goto out;
1243			break;
1244		case IOPOL_TYPE_VFS_HFS_CASE_SENSITIVITY:
1245			error = iopolicysys_vfs(p, uap->cmd, iop_param.iop_scope, iop_param.iop_policy, &iop_param);
1246			if (error)
1247				goto out;
1248			break;
1249		default:
1250			error = EINVAL;
1251			goto out;
1252	}
1253
1254	/* Individual iotype handlers are expected to update iop_param, if requested with a GET command */
1255	if (uap->cmd == IOPOL_CMD_GET) {
1256		error = copyout((caddr_t)&iop_param, uap->arg, sizeof(iop_param));
1257		if (error)
1258			goto out;
1259	}
1260
1261out:
1262	return (error);
1263}
1264
1265static int
1266iopolicysys_disk(struct proc *p __unused, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1267{
1268	int			error = 0;
1269	thread_t	thread;
1270	int			policy_flavor;
1271
1272	/* Validate scope */
1273	switch (scope) {
1274		case IOPOL_SCOPE_PROCESS:
1275			thread = THREAD_NULL;
1276			policy_flavor = TASK_POLICY_IOPOL;
1277			break;
1278
1279		case IOPOL_SCOPE_THREAD:
1280			thread = current_thread();
1281			policy_flavor = TASK_POLICY_IOPOL;
1282			break;
1283
1284		case IOPOL_SCOPE_DARWIN_BG:
1285			thread = THREAD_NULL;
1286			policy_flavor = TASK_POLICY_DARWIN_BG_IOPOL;
1287			break;
1288
1289		default:
1290			error = EINVAL;
1291			goto out;
1292	}
1293
1294	/* Validate policy */
1295	if (cmd == IOPOL_CMD_SET) {
1296		switch (policy) {
1297			case IOPOL_DEFAULT:
1298				if (scope == IOPOL_SCOPE_DARWIN_BG) {
1299					/* the current default BG throttle level is UTILITY */
1300					policy = IOPOL_UTILITY;
1301				} else {
1302					policy = IOPOL_IMPORTANT;
1303				}
1304				break;
1305			case IOPOL_UTILITY:
1306				/* fall-through */
1307			case IOPOL_THROTTLE:
1308				/* These levels are OK */
1309				break;
1310			case IOPOL_IMPORTANT:
1311				/* fall-through */
1312			case IOPOL_STANDARD:
1313				/* fall-through */
1314			case IOPOL_PASSIVE:
1315				if (scope == IOPOL_SCOPE_DARWIN_BG) {
1316					/* These levels are invalid for BG */
1317					error = EINVAL;
1318					goto out;
1319				} else {
1320					/* OK for other scopes */
1321				}
1322				break;
1323			default:
1324				error = EINVAL;
1325				goto out;
1326		}
1327	}
1328
1329	/* Perform command */
1330	switch(cmd) {
1331		case IOPOL_CMD_SET:
1332			proc_set_task_policy(current_task(), thread,
1333								 TASK_POLICY_INTERNAL, policy_flavor,
1334								 policy);
1335			break;
1336		case IOPOL_CMD_GET:
1337			policy = proc_get_task_policy(current_task(), thread,
1338										  TASK_POLICY_INTERNAL, policy_flavor);
1339
1340			iop_param->iop_policy = policy;
1341			break;
1342		default:
1343			error = EINVAL; /* unknown command */
1344			break;
1345	}
1346
1347out:
1348	return (error);
1349}
1350
1351static int
1352iopolicysys_vfs(struct proc *p, int cmd, int scope, int policy, struct _iopol_param_t *iop_param)
1353{
1354	int			error = 0;
1355
1356	/* Validate scope */
1357	switch (scope) {
1358		case IOPOL_SCOPE_PROCESS:
1359			/* Only process OK */
1360			break;
1361		default:
1362			error = EINVAL;
1363			goto out;
1364	}
1365
1366	/* Validate policy */
1367	if (cmd == IOPOL_CMD_SET) {
1368		switch (policy) {
1369			case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
1370				/* fall-through */
1371			case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
1372				/* These policies are OK */
1373				break;
1374			default:
1375				error = EINVAL;
1376				goto out;
1377		}
1378	}
1379
1380	/* Perform command */
1381	switch(cmd) {
1382		case IOPOL_CMD_SET:
1383			if (0 == kauth_cred_issuser(kauth_cred_get())) {
1384				error = EPERM;
1385				goto out;
1386			}
1387
1388			switch (policy) {
1389				case IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT:
1390					OSBitAndAtomic16(~((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY), &p->p_vfs_iopolicy);
1391					break;
1392				case IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE:
1393					OSBitOrAtomic16((uint32_t)P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY, &p->p_vfs_iopolicy);
1394					break;
1395				default:
1396					error = EINVAL;
1397					goto out;
1398			}
1399
1400			break;
1401		case IOPOL_CMD_GET:
1402			iop_param->iop_policy = (p->p_vfs_iopolicy & P_VFS_IOPOLICY_FORCE_HFS_CASE_SENSITIVITY)
1403				? IOPOL_VFS_HFS_CASE_SENSITIVITY_FORCE_CASE_SENSITIVE
1404				: IOPOL_VFS_HFS_CASE_SENSITIVITY_DEFAULT;
1405			break;
1406		default:
1407			error = EINVAL; /* unknown command */
1408			break;
1409	}
1410
1411out:
1412	return (error);
1413}
1414
1415/* BSD call back function for task_policy */
1416void proc_apply_task_networkbg(void * bsd_info, thread_t thread, int bg);
1417
1418void
1419proc_apply_task_networkbg(void * bsd_info, thread_t thread, int bg)
1420{
1421	proc_t p = PROC_NULL;
1422	proc_t curp = (proc_t)bsd_info;
1423	pid_t pid;
1424	int prio = (bg ? PRIO_DARWIN_BG : 0);
1425
1426	pid = curp->p_pid;
1427	p = proc_find(pid);
1428	if (p != PROC_NULL) {
1429		do_background_socket(p, thread, prio);
1430		proc_rele(p);
1431	}
1432}
1433
1434void
1435gather_rusage_info_v2(proc_t p, struct rusage_info_v2 *ru, int flavor)
1436{
1437	struct rusage_info_child *ri_child;
1438
1439	assert(p->p_stats != NULL);
1440	switch(flavor) {
1441	case RUSAGE_INFO_V2:
1442		ru->ri_diskio_bytesread = p->p_stats->ri_diskiobytes.ri_bytesread;
1443		ru->ri_diskio_byteswritten = p->p_stats->ri_diskiobytes.ri_byteswritten;
1444		/* fall through */
1445
1446	case RUSAGE_INFO_V1:
1447		/*
1448		 * p->p_stats->ri_child statistics are protected under proc lock.
1449		 */
1450		proc_lock(p);
1451
1452		ri_child = &(p->p_stats->ri_child);
1453		ru->ri_child_user_time = ri_child->ri_child_user_time;
1454		ru->ri_child_system_time = ri_child->ri_child_system_time;
1455		ru->ri_child_pkg_idle_wkups = ri_child->ri_child_pkg_idle_wkups;
1456		ru->ri_child_interrupt_wkups = ri_child->ri_child_interrupt_wkups;
1457		ru->ri_child_pageins = ri_child->ri_child_pageins;
1458		ru->ri_child_elapsed_abstime = ri_child->ri_child_elapsed_abstime;
1459
1460		proc_unlock(p);
1461		/* fall through */
1462
1463	case RUSAGE_INFO_V0:
1464		proc_getexecutableuuid(p, (unsigned char *)&ru->ri_uuid, sizeof (ru->ri_uuid));
1465		fill_task_rusage_v2(p->task, ru);
1466		ru->ri_proc_start_abstime = p->p_stats->ps_start;
1467	}
1468}
1469
1470/*
1471 * Temporary function to copy value from rusage_info_v2 to rusage_info_v0.
1472 */
1473static void
1474rusage_info_v2_to_v0(struct rusage_info_v0 *ri_v0, struct rusage_info_v2 *ri_v2)
1475{
1476	memcpy(&ri_v0->ri_uuid[0], &ri_v2->ri_uuid[0], sizeof(ri_v0->ri_uuid));
1477	ri_v0->ri_user_time = ri_v2->ri_user_time;
1478	ri_v0->ri_system_time = ri_v2->ri_system_time;
1479	ri_v0->ri_pkg_idle_wkups = ri_v2->ri_pkg_idle_wkups;
1480	ri_v0->ri_interrupt_wkups = ri_v2->ri_interrupt_wkups;
1481	ri_v0->ri_pageins = ri_v2->ri_pageins;
1482	ri_v0->ri_wired_size = ri_v2->ri_wired_size;
1483	ri_v0->ri_resident_size = ri_v2->ri_resident_size;
1484	ri_v0->ri_phys_footprint = ri_v2->ri_phys_footprint;
1485	ri_v0->ri_proc_start_abstime = ri_v2->ri_proc_start_abstime;
1486	ri_v0->ri_proc_exit_abstime = ri_v2->ri_proc_exit_abstime;
1487}
1488
1489static void
1490rusage_info_v2_to_v1(struct rusage_info_v1 *ri_v1, struct rusage_info_v2 *ri_v2)
1491{
1492	memcpy(&ri_v1->ri_uuid[0], &ri_v2->ri_uuid[0], sizeof(ri_v1->ri_uuid));
1493	ri_v1->ri_user_time = ri_v2->ri_user_time;
1494	ri_v1->ri_system_time = ri_v2->ri_system_time;
1495	ri_v1->ri_pkg_idle_wkups = ri_v2->ri_pkg_idle_wkups;
1496	ri_v1->ri_interrupt_wkups = ri_v2->ri_interrupt_wkups;
1497	ri_v1->ri_pageins = ri_v2->ri_pageins;
1498	ri_v1->ri_wired_size = ri_v2->ri_wired_size;
1499	ri_v1->ri_resident_size = ri_v2->ri_resident_size;
1500	ri_v1->ri_phys_footprint = ri_v2->ri_phys_footprint;
1501	ri_v1->ri_proc_start_abstime = ri_v2->ri_proc_start_abstime;
1502	ri_v1->ri_proc_exit_abstime = ri_v2->ri_proc_exit_abstime;
1503	ri_v1->ri_child_user_time = ri_v2->ri_child_user_time;
1504	ri_v1->ri_child_system_time = ri_v2->ri_child_system_time;
1505	ri_v1->ri_child_pkg_idle_wkups = ri_v2->ri_child_pkg_idle_wkups;
1506	ri_v1->ri_child_interrupt_wkups = ri_v2->ri_child_interrupt_wkups;
1507	ri_v1->ri_child_pageins = ri_v2->ri_child_pageins;
1508	ri_v1->ri_child_elapsed_abstime = ri_v2->ri_child_elapsed_abstime;
1509}
1510
1511int
1512proc_get_rusage(proc_t p, int flavor, user_addr_t buffer, __unused int is_zombie)
1513{
1514	struct rusage_info_v0 ri_v0;
1515	struct rusage_info_v1 ri_v1;
1516	struct rusage_info_v2 ri_v2;
1517
1518	int error = 0;
1519
1520	switch (flavor) {
1521	case RUSAGE_INFO_V0:
1522		/*
1523		 * If task is still alive, collect info from the live task itself.
1524		 * Otherwise, look to the cached info in the zombie proc.
1525		 */
1526		if (p->p_ru == NULL) {
1527			gather_rusage_info_v2(p, &ri_v2, flavor);
1528			ri_v2.ri_proc_exit_abstime = 0;
1529			rusage_info_v2_to_v0(&ri_v0, &ri_v2);
1530		} else {
1531			rusage_info_v2_to_v0(&ri_v0, &p->p_ru->ri);
1532		}
1533		error = copyout(&ri_v0, buffer, sizeof (ri_v0));
1534		break;
1535
1536	case RUSAGE_INFO_V1:
1537		/*
1538		 * If task is still alive, collect info from the live task itself.
1539		 * Otherwise, look to the cached info in the zombie proc.
1540		 */
1541		if (p->p_ru == NULL) {
1542			gather_rusage_info_v2(p, &ri_v2, flavor);
1543			ri_v2.ri_proc_exit_abstime = 0;
1544			rusage_info_v2_to_v1(&ri_v1, &ri_v2);
1545		} else {
1546			rusage_info_v2_to_v1(&ri_v1, &p->p_ru->ri);
1547		}
1548		error = copyout(&ri_v1, buffer, sizeof (ri_v1));
1549		break;
1550
1551	case RUSAGE_INFO_V2:
1552		/*
1553		 * If task is still alive, collect info from the live task itself.
1554		 * Otherwise, look to the cached info in the zombie proc.
1555		 */
1556		if (p->p_ru == NULL) {
1557			gather_rusage_info_v2(p, &ri_v2, flavor);
1558			ri_v2.ri_proc_exit_abstime = 0;
1559		} else {
1560			ri_v2 = p->p_ru->ri;
1561		}
1562		error = copyout(&ri_v2, buffer, sizeof (ri_v2));
1563		break;
1564
1565	default:
1566		error = EINVAL;
1567		break;
1568	}
1569
1570	return (error);
1571}
1572
1573static int
1574mach_to_bsd_rv(int mach_rv)
1575{
1576	int bsd_rv = 0;
1577
1578	switch (mach_rv) {
1579	case KERN_SUCCESS:
1580		bsd_rv = 0;
1581		break;
1582	case KERN_INVALID_ARGUMENT:
1583		bsd_rv = EINVAL;
1584		break;
1585	default:
1586		panic("unknown error %#x", mach_rv);
1587	}
1588
1589	return bsd_rv;
1590}
1591
1592/*
1593 * Resource limit controls
1594 *
1595 * uap->flavor available flavors:
1596 *
1597 *     RLIMIT_WAKEUPS_MONITOR
1598 */
1599int
1600proc_rlimit_control(__unused struct proc *p, struct proc_rlimit_control_args *uap, int32_t *retval)
1601{
1602	proc_t	targetp;
1603	int 	error = 0;
1604	struct	proc_rlimit_control_wakeupmon wakeupmon_args;
1605	uint32_t cpumon_flags;
1606	kauth_cred_t my_cred, target_cred;
1607
1608	*retval = 0;
1609
1610	if ((targetp = proc_find(uap->pid)) == PROC_NULL) {
1611		*retval = -1;
1612		return (ESRCH);
1613	}
1614
1615	my_cred = kauth_cred_get();
1616	target_cred = kauth_cred_proc_ref(targetp);
1617
1618	if (!kauth_cred_issuser(my_cred) && kauth_cred_getruid(my_cred) &&
1619	    kauth_cred_getuid(my_cred) != kauth_cred_getuid(target_cred) &&
1620	    kauth_cred_getruid(my_cred) != kauth_cred_getuid(target_cred)) {
1621		proc_rele(targetp);
1622		kauth_cred_unref(&target_cred);
1623		*retval = -1;
1624		error = EACCES;
1625		return (error);
1626	}
1627
1628	switch (uap->flavor) {
1629	case RLIMIT_WAKEUPS_MONITOR:
1630		if ((error = copyin(uap->arg, &wakeupmon_args, sizeof (wakeupmon_args))) != 0) {
1631			break;
1632		}
1633		if ((error = mach_to_bsd_rv(task_wakeups_monitor_ctl(targetp->task, &wakeupmon_args.wm_flags,
1634		     &wakeupmon_args.wm_rate))) != 0) {
1635			break;
1636		}
1637		error = copyout(&wakeupmon_args, uap->arg, sizeof (wakeupmon_args));
1638		break;
1639	case RLIMIT_CPU_USAGE_MONITOR:
1640		cpumon_flags = uap->arg; // XXX temporarily stashing flags in argp (12592127)
1641		error = mach_to_bsd_rv(task_cpu_usage_monitor_ctl(targetp->task, &cpumon_flags));
1642		break;
1643	default:
1644		error = EINVAL;
1645		break;
1646	}
1647
1648	proc_rele(targetp);
1649	kauth_cred_unref(&target_cred);
1650
1651	if (error != 0) {
1652		*retval = -1;
1653	}
1654
1655	/*
1656	 * Return value from this function becomes errno to userland caller.
1657	 * *retval is what the system call invocation returns.
1658	 */
1659	return (error);
1660}
1661