1/*
2 * Copyright (c) 2000-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29/*-
30 * Copyright (c) 1982, 1986, 1989, 1993
31 *	The Regents of the University of California.  All rights reserved.
32 *
33 * This code is derived from software contributed to Berkeley by
34 * Mike Karels at Berkeley Software Design, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed by the University of
47 *	California, Berkeley and its contributors.
48 * 4. Neither the name of the University nor the names of its contributors
49 *    may be used to endorse or promote products derived from this software
50 *    without specific prior written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * SUCH DAMAGE.
63 *
64 *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
65 */
66/*
67 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
68 * support for mandatory and extensible security protections.  This notice
69 * is included in support of clause 2.2 (b) of the Apple Public License,
70 * Version 2.0.
71 */
72
73/*
74* DEPRECATED sysctl system call code
75 *
76 * Everything in this file is deprecated. Sysctls should be handled
77 * by the code in kern_newsysctl.c.
78 * The remaining "case" sections are supposed to be converted into
79 * SYSCTL_*-style definitions, and as soon as all of them are gone,
80 * this source file is supposed to die.
81 *
82 * DO NOT ADD ANY MORE "case" SECTIONS TO THIS FILE, instead define
83 * your sysctl with SYSCTL_INT, SYSCTL_PROC etc. in your source file.
84 */
85
86#include <sys/param.h>
87#include <sys/systm.h>
88#include <sys/kernel.h>
89#include <sys/malloc.h>
90#include <sys/proc_internal.h>
91#include <sys/kauth.h>
92#include <sys/file_internal.h>
93#include <sys/vnode_internal.h>
94#include <sys/unistd.h>
95#include <sys/buf.h>
96#include <sys/ioctl.h>
97#include <sys/namei.h>
98#include <sys/tty.h>
99#include <sys/disklabel.h>
100#include <sys/vm.h>
101#include <sys/sysctl.h>
102#include <sys/user.h>
103#include <sys/aio_kern.h>
104#include <sys/reboot.h>
105
106#include <security/audit/audit.h>
107#include <kern/kalloc.h>
108
109#include <mach/machine.h>
110#include <mach/mach_host.h>
111#include <mach/mach_types.h>
112#include <mach/vm_param.h>
113#include <kern/mach_param.h>
114#include <kern/task.h>
115#include <kern/thread.h>
116#include <kern/processor.h>
117#include <kern/debug.h>
118#include <vm/vm_kern.h>
119#include <vm/vm_map.h>
120#include <mach/host_info.h>
121
122#include <sys/mount_internal.h>
123#include <sys/kdebug.h>
124
125#include <IOKit/IOPlatformExpert.h>
126#include <pexpert/pexpert.h>
127
128#include <machine/machine_routines.h>
129#include <machine/exec.h>
130
131#include <vm/vm_protos.h>
132#include <vm/vm_pageout.h>
133#include <sys/imgsrc.h>
134#include <kern/timer_call.h>
135
136#if defined(__i386__) || defined(__x86_64__)
137#include <i386/cpuid.h>
138#endif
139
140#if CONFIG_FREEZE
141#include <sys/kern_memorystatus.h>
142#endif
143
144#if KPERF
145#include <kperf/kperf.h>
146#endif
147
148#if HYPERVISOR
149#include <kern/hv_support.h>
150#endif
151
152/*
153 * deliberately setting max requests to really high number
154 * so that runaway settings do not cause MALLOC overflows
155 */
156#define AIO_MAX_REQUESTS (128 * CONFIG_AIO_MAX)
157
158extern int aio_max_requests;
159extern int aio_max_requests_per_process;
160extern int aio_worker_threads;
161extern int lowpri_IO_window_msecs;
162extern int lowpri_IO_delay_msecs;
163extern int nx_enabled;
164extern int speculative_reads_disabled;
165extern int ignore_is_ssd;
166extern unsigned int speculative_prefetch_max;
167extern unsigned int speculative_prefetch_max_iosize;
168extern unsigned int preheat_max_bytes;
169extern unsigned int preheat_min_bytes;
170extern long numvnodes;
171
172extern uuid_string_t bootsessionuuid_string;
173
174extern unsigned int vm_max_delayed_work_limit;
175extern unsigned int vm_max_batch;
176
177extern unsigned int vm_page_free_min;
178extern unsigned int vm_page_free_target;
179extern unsigned int vm_page_free_reserved;
180extern unsigned int vm_page_speculative_percentage;
181extern unsigned int vm_page_speculative_q_age_ms;
182
183/*
184 * Conditionally allow dtrace to see these functions for debugging purposes.
185 */
186#ifdef STATIC
187#undef STATIC
188#endif
189#if 0
190#define STATIC
191#else
192#define STATIC static
193#endif
194
195extern boolean_t    mach_timer_coalescing_enabled;
196
197extern uint64_t timer_deadline_tracking_bin_1, timer_deadline_tracking_bin_2;
198
199STATIC void
200fill_user32_eproc(proc_t, struct user32_eproc *__restrict);
201STATIC void
202fill_user32_externproc(proc_t, struct user32_extern_proc *__restrict);
203STATIC void
204fill_user64_eproc(proc_t, struct user64_eproc *__restrict);
205STATIC void
206fill_user64_proc(proc_t, struct user64_kinfo_proc *__restrict);
207STATIC void
208fill_user64_externproc(proc_t, struct user64_extern_proc *__restrict);
209STATIC void
210fill_user32_proc(proc_t, struct user32_kinfo_proc *__restrict);
211
212extern int
213kdbg_control(int *name, u_int namelen, user_addr_t where, size_t * sizep);
214#if NFSCLIENT
215extern int
216netboot_root(void);
217#endif
218int
219pcsamples_ops(int *name, u_int namelen, user_addr_t where, size_t *sizep,
220              proc_t p);
221__private_extern__ kern_return_t
222reset_vmobjectcache(unsigned int val1, unsigned int val2);
223int
224sysctl_procargs(int *name, u_int namelen, user_addr_t where,
225				size_t *sizep, proc_t cur_proc);
226STATIC int
227sysctl_procargsx(int *name, u_int namelen, user_addr_t where, size_t *sizep,
228                 proc_t cur_proc, int argc_yes);
229int
230sysctl_struct(user_addr_t oldp, size_t *oldlenp, user_addr_t newp,
231              size_t newlen, void *sp, int len);
232
233STATIC int sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg);
234STATIC int sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg);
235STATIC int sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg);
236STATIC int  sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg);
237STATIC int  sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg);
238#if CONFIG_LCTX
239STATIC int  sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg);
240#endif
241int sysdoproc_callback(proc_t p, void *arg);
242
243
244/* forward declarations for non-static STATIC */
245STATIC void fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64);
246STATIC void fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32);
247STATIC int sysctl_handle_kern_threadname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
248STATIC int sysctl_sched_stats(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
249STATIC int sysctl_sched_stats_enable(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
250STATIC int sysctl_kdebug_ops SYSCTL_HANDLER_ARGS;
251#if COUNT_SYSCALLS
252STATIC int sysctl_docountsyscalls SYSCTL_HANDLER_ARGS;
253#endif	/* COUNT_SYSCALLS */
254STATIC int sysctl_doprocargs SYSCTL_HANDLER_ARGS;
255STATIC int sysctl_doprocargs2 SYSCTL_HANDLER_ARGS;
256STATIC int sysctl_prochandle SYSCTL_HANDLER_ARGS;
257STATIC int sysctl_aiomax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
258STATIC int sysctl_aioprocmax(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
259STATIC int sysctl_aiothreads(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
260STATIC int sysctl_maxproc(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
261STATIC int sysctl_osversion(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
262STATIC int sysctl_sysctl_bootargs(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
263STATIC int sysctl_maxvnodes(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
264STATIC int sysctl_securelvl(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
265STATIC int sysctl_domainname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
266STATIC int sysctl_hostname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
267STATIC int sysctl_procname(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
268STATIC int sysctl_boottime(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
269STATIC int sysctl_symfile(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
270#if NFSCLIENT
271STATIC int sysctl_netboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
272#endif
273#ifdef CONFIG_IMGSRC_ACCESS
274STATIC int sysctl_imgsrcdev(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
275#endif
276STATIC int sysctl_usrstack(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
277STATIC int sysctl_usrstack64(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
278STATIC int sysctl_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
279STATIC int sysctl_suid_coredump(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
280STATIC int sysctl_delayterm(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
281STATIC int sysctl_rage_vnode(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
282STATIC int sysctl_kern_check_openevt(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
283STATIC int sysctl_nx(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
284STATIC int sysctl_loadavg(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
285STATIC int sysctl_vm_toggle_address_reuse(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
286STATIC int sysctl_swapusage(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
287STATIC int fetch_process_cputype( proc_t cur_proc, int *name, u_int namelen, cpu_type_t *cputype);
288STATIC int sysctl_sysctl_native(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
289STATIC int sysctl_sysctl_cputype(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
290STATIC int sysctl_safeboot(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
291STATIC int sysctl_singleuser(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
292STATIC int sysctl_slide(struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req);
293
294
295extern void IORegistrySetOSBuildVersion(char * build_version);
296
297STATIC void
298fill_loadavg64(struct loadavg *la, struct user64_loadavg *la64)
299{
300	la64->ldavg[0]	= la->ldavg[0];
301	la64->ldavg[1]	= la->ldavg[1];
302	la64->ldavg[2]	= la->ldavg[2];
303	la64->fscale	= (user64_long_t)la->fscale;
304}
305
306STATIC void
307fill_loadavg32(struct loadavg *la, struct user32_loadavg *la32)
308{
309	la32->ldavg[0]	= la->ldavg[0];
310	la32->ldavg[1]	= la->ldavg[1];
311	la32->ldavg[2]	= la->ldavg[2];
312	la32->fscale	= (user32_long_t)la->fscale;
313}
314
315/*
316 * Attributes stored in the kernel.
317 */
318extern char corefilename[MAXPATHLEN+1];
319extern int do_coredump;
320extern int sugid_coredump;
321
322#if COUNT_SYSCALLS
323extern int do_count_syscalls;
324#endif
325
326#ifdef INSECURE
327int securelevel = -1;
328#else
329int securelevel;
330#endif
331
332STATIC int
333sysctl_handle_kern_threadname(	__unused struct sysctl_oid *oidp, __unused void *arg1,
334	      __unused int arg2, struct sysctl_req *req)
335{
336	int error;
337	struct uthread *ut = get_bsdthread_info(current_thread());
338	user_addr_t oldp=0, newp=0;
339	size_t *oldlenp=NULL;
340	size_t newlen=0;
341
342	oldp = req->oldptr;
343	oldlenp = &(req->oldlen);
344	newp = req->newptr;
345	newlen = req->newlen;
346
347	/* We want the current length, and maybe the string itself */
348	if(oldlenp) {
349		/* if we have no thread name yet tell'em we want MAXTHREADNAMESIZE - 1 */
350		size_t currlen = MAXTHREADNAMESIZE - 1;
351
352		if(ut->pth_name)
353			/* use length of current thread name */
354			currlen = strlen(ut->pth_name);
355		if(oldp) {
356			if(*oldlenp < currlen)
357				return ENOMEM;
358			/* NOTE - we do not copy the NULL terminator */
359			if(ut->pth_name) {
360				error = copyout(ut->pth_name,oldp,currlen);
361				if(error)
362					return error;
363			}
364		}
365		/* return length of thread name minus NULL terminator (just like strlen)  */
366		req->oldidx = currlen;
367	}
368
369	/* We want to set the name to something */
370	if(newp)
371	{
372		if(newlen > (MAXTHREADNAMESIZE - 1))
373			return ENAMETOOLONG;
374		if(!ut->pth_name)
375		{
376			ut->pth_name = (char*)kalloc( MAXTHREADNAMESIZE );
377			if(!ut->pth_name)
378				return ENOMEM;
379		}
380		bzero(ut->pth_name, MAXTHREADNAMESIZE);
381		error = copyin(newp, ut->pth_name, newlen);
382		if(error)
383			return error;
384	}
385
386	return 0;
387}
388
389SYSCTL_PROC(_kern, KERN_THREADNAME, threadname, CTLFLAG_ANYBODY | CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_handle_kern_threadname,"A","");
390
391#define BSD_HOST 1
392STATIC int
393sysctl_sched_stats(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
394{
395	host_basic_info_data_t hinfo;
396	kern_return_t kret;
397	uint32_t size;
398	int changed;
399	mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
400	struct _processor_statistics_np *buf;
401	int error;
402
403	kret = host_info((host_t)BSD_HOST, HOST_BASIC_INFO, (host_info_t)&hinfo, &count);
404	if (kret != KERN_SUCCESS) {
405		return EINVAL;
406	}
407
408	size = sizeof(struct _processor_statistics_np) * (hinfo.logical_cpu_max + 2); /* One for RT Queue, One for Fair Share Queue */
409
410	if (req->oldlen < size) {
411		return EINVAL;
412	}
413
414	MALLOC(buf, struct _processor_statistics_np*, size, M_TEMP, M_ZERO | M_WAITOK);
415
416	kret = get_sched_statistics(buf, &size);
417	if (kret != KERN_SUCCESS) {
418		error = EINVAL;
419		goto out;
420	}
421
422	error = sysctl_io_opaque(req, buf, size, &changed);
423	if (error) {
424		goto out;
425	}
426
427	if (changed) {
428		panic("Sched info changed?!");
429	}
430out:
431	FREE(buf, M_TEMP);
432	return error;
433}
434
435SYSCTL_PROC(_kern, OID_AUTO, sched_stats, CTLFLAG_LOCKED, 0, 0, sysctl_sched_stats, "-", "");
436
437STATIC int
438sysctl_sched_stats_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, __unused struct sysctl_req *req)
439{
440	boolean_t active;
441	int res;
442
443	if (req->newlen != sizeof(active)) {
444		return EINVAL;
445	}
446
447	res = copyin(req->newptr, &active, sizeof(active));
448	if (res != 0) {
449		return res;
450	}
451
452	return set_sched_stats_active(active);
453}
454
455SYSCTL_PROC(_kern, OID_AUTO, sched_stats_enable, CTLFLAG_LOCKED | CTLFLAG_WR, 0, 0, sysctl_sched_stats_enable, "-", "");
456
457extern int get_kernel_symfile(proc_t, char **);
458
459#if COUNT_SYSCALLS
460#define KERN_COUNT_SYSCALLS (KERN_OSTYPE + 1000)
461
462extern int 	nsysent;
463extern int syscalls_log[];
464extern const char *syscallnames[];
465
466STATIC int
467sysctl_docountsyscalls SYSCTL_HANDLER_ARGS
468{
469	__unused int cmd = oidp->oid_arg2;	/* subcommand*/
470	__unused int *name = arg1;	/* oid element argument vector */
471	__unused int namelen = arg2;	/* number of oid element arguments */
472	user_addr_t oldp = req->oldptr;	/* user buffer copy out address */
473	size_t *oldlenp = &req->oldlen;	/* user buffer copy out size */
474	user_addr_t newp = req->newptr;	/* user buffer copy in address */
475	size_t newlen = req->newlen;	/* user buffer copy in size */
476	int error;
477
478	int tmp;
479
480	/* valid values passed in:
481	 * = 0 means don't keep called counts for each bsd syscall
482	 * > 0 means keep called counts for each bsd syscall
483	 * = 2 means dump current counts to the system log
484	 * = 3 means reset all counts
485	 * for example, to dump current counts:
486	 *		sysctl -w kern.count_calls=2
487	 */
488	error = sysctl_int(oldp, oldlenp, newp, newlen, &tmp);
489	if ( error != 0 ) {
490		return (error);
491	}
492
493	if ( tmp == 1 ) {
494		do_count_syscalls = 1;
495	}
496	else if ( tmp == 0 || tmp == 2 || tmp == 3 ) {
497		int			i;
498		for ( i = 0; i < nsysent; i++ ) {
499			if ( syscalls_log[i] != 0 ) {
500				if ( tmp == 2 ) {
501					printf("%d calls - name %s \n", syscalls_log[i], syscallnames[i]);
502				}
503				else {
504					syscalls_log[i] = 0;
505				}
506			}
507		}
508		if ( tmp != 0 ) {
509			do_count_syscalls = 1;
510		}
511	}
512
513	/* adjust index so we return the right required/consumed amount */
514	if (!error)
515		req->oldidx += req->oldlen;
516
517	return (error);
518}
519SYSCTL_PROC(_kern, KERN_COUNT_SYSCALLS, count_syscalls, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
520	0,			/* Pointer argument (arg1) */
521	0,			/* Integer argument (arg2) */
522	sysctl_docountsyscalls,	/* Handler function */
523	NULL,			/* Data pointer */
524	"");
525#endif	/* COUNT_SYSCALLS */
526
527/*
528 * The following sysctl_* functions should not be used
529 * any more, as they can only cope with callers in
530 * user mode: Use new-style
531 *  sysctl_io_number()
532 *  sysctl_io_string()
533 *  sysctl_io_opaque()
534 * instead.
535 */
536
537/*
538 * Validate parameters and get old / set new parameters
539 * for an integer-valued sysctl function.
540 */
541int
542sysctl_int(user_addr_t oldp, size_t *oldlenp,
543           user_addr_t newp, size_t newlen, int *valp)
544{
545	int error = 0;
546
547	if (oldp != USER_ADDR_NULL && oldlenp == NULL)
548		return (EFAULT);
549	if (oldp && *oldlenp < sizeof(int))
550		return (ENOMEM);
551	if (newp && newlen != sizeof(int))
552		return (EINVAL);
553	*oldlenp = sizeof(int);
554	if (oldp)
555		error = copyout(valp, oldp, sizeof(int));
556	if (error == 0 && newp) {
557		error = copyin(newp, valp, sizeof(int));
558		AUDIT_ARG(value32, *valp);
559	}
560	return (error);
561}
562
563/*
564 * Validate parameters and get old / set new parameters
565 * for an quad(64bit)-valued sysctl function.
566 */
567int
568sysctl_quad(user_addr_t oldp, size_t *oldlenp,
569            user_addr_t newp, size_t newlen, quad_t *valp)
570{
571	int error = 0;
572
573	if (oldp != USER_ADDR_NULL && oldlenp == NULL)
574		return (EFAULT);
575	if (oldp && *oldlenp < sizeof(quad_t))
576		return (ENOMEM);
577	if (newp && newlen != sizeof(quad_t))
578		return (EINVAL);
579	*oldlenp = sizeof(quad_t);
580	if (oldp)
581		error = copyout(valp, oldp, sizeof(quad_t));
582	if (error == 0 && newp)
583		error = copyin(newp, valp, sizeof(quad_t));
584	return (error);
585}
586
587STATIC int
588sysdoproc_filt_KERN_PROC_PID(proc_t p, void * arg)
589{
590	if (p->p_pid != (pid_t)*(int*)arg)
591		return(0);
592	else
593		return(1);
594}
595
596STATIC int
597sysdoproc_filt_KERN_PROC_PGRP(proc_t p, void * arg)
598{
599	if (p->p_pgrpid != (pid_t)*(int*)arg)
600		return(0);
601	else
602	  return(1);
603}
604
605STATIC int
606sysdoproc_filt_KERN_PROC_TTY(proc_t p, void * arg)
607{
608	int retval;
609	struct tty *tp;
610
611	/* This is very racy but list lock is held.. Hmmm. */
612	if ((p->p_flag & P_CONTROLT) == 0 ||
613		(p->p_pgrp == NULL) || (p->p_pgrp->pg_session == NULL) ||
614			(tp = SESSION_TP(p->p_pgrp->pg_session)) == TTY_NULL ||
615			tp->t_dev != (dev_t)*(int*)arg)
616				retval = 0;
617	else
618		retval = 1;
619
620	return(retval);
621}
622
623STATIC int
624sysdoproc_filt_KERN_PROC_UID(proc_t p, void * arg)
625{
626	kauth_cred_t my_cred;
627	uid_t uid;
628
629	if (p->p_ucred == NULL)
630		return(0);
631	my_cred = kauth_cred_proc_ref(p);
632	uid = kauth_cred_getuid(my_cred);
633	kauth_cred_unref(&my_cred);
634
635	if (uid != (uid_t)*(int*)arg)
636		return(0);
637	else
638		return(1);
639}
640
641
642STATIC int
643sysdoproc_filt_KERN_PROC_RUID(proc_t p, void * arg)
644{
645	kauth_cred_t my_cred;
646	uid_t ruid;
647
648	if (p->p_ucred == NULL)
649		return(0);
650	my_cred = kauth_cred_proc_ref(p);
651	ruid = kauth_cred_getruid(my_cred);
652	kauth_cred_unref(&my_cred);
653
654	if (ruid != (uid_t)*(int*)arg)
655		return(0);
656	else
657		return(1);
658}
659
660#if CONFIG_LCTX
661STATIC int
662sysdoproc_filt_KERN_PROC_LCID(proc_t p, void * arg)
663{
664	if ((p->p_lctx == NULL) ||
665		(p->p_lctx->lc_id != (pid_t)*(int*)arg))
666		return(0);
667	else
668		return(1);
669}
670#endif
671
672/*
673 * try over estimating by 5 procs
674 */
675#define KERN_PROCSLOP	(5 * sizeof (struct kinfo_proc))
676struct sysdoproc_args {
677	int	buflen;
678	void	*kprocp;
679	boolean_t is_64_bit;
680	user_addr_t	dp;
681	size_t needed;
682	int sizeof_kproc;
683	int *errorp;
684	int uidcheck;
685	int ruidcheck;
686	int ttycheck;
687	int uidval;
688};
689
690int
691sysdoproc_callback(proc_t p, void *arg)
692{
693	struct sysdoproc_args *args = arg;
694
695	if (args->buflen >= args->sizeof_kproc) {
696		if ((args->ruidcheck != 0) && (sysdoproc_filt_KERN_PROC_RUID(p, &args->uidval) == 0))
697			return (PROC_RETURNED);
698		if ((args->uidcheck != 0) && (sysdoproc_filt_KERN_PROC_UID(p, &args->uidval) == 0))
699			return (PROC_RETURNED);
700		if ((args->ttycheck != 0) && (sysdoproc_filt_KERN_PROC_TTY(p, &args->uidval) == 0))
701			return (PROC_RETURNED);
702
703		bzero(args->kprocp, args->sizeof_kproc);
704		if (args->is_64_bit)
705			fill_user64_proc(p, args->kprocp);
706		else
707			fill_user32_proc(p, args->kprocp);
708		int error = copyout(args->kprocp, args->dp, args->sizeof_kproc);
709		if (error) {
710			*args->errorp = error;
711			return (PROC_RETURNED_DONE);
712		}
713		args->dp += args->sizeof_kproc;
714		args->buflen -= args->sizeof_kproc;
715	}
716	args->needed += args->sizeof_kproc;
717	return (PROC_RETURNED);
718}
719
720SYSCTL_NODE(_kern, KERN_PROC, proc, CTLFLAG_RD | CTLFLAG_LOCKED, 0, "");
721STATIC int
722sysctl_prochandle SYSCTL_HANDLER_ARGS
723{
724	int cmd = oidp->oid_arg2;	/* subcommand for multiple nodes */
725	int *name = arg1;		/* oid element argument vector */
726	int namelen = arg2;		/* number of oid element arguments */
727	user_addr_t where = req->oldptr;/* user buffer copy out address */
728
729	user_addr_t dp = where;
730	size_t needed = 0;
731	int buflen = where != USER_ADDR_NULL ? req->oldlen : 0;
732	int error = 0;
733	boolean_t is_64_bit = proc_is64bit(current_proc());
734	struct user32_kinfo_proc  user32_kproc;
735	struct user64_kinfo_proc  user_kproc;
736	int sizeof_kproc;
737	void *kprocp;
738	int (*filterfn)(proc_t, void *) = 0;
739	struct sysdoproc_args args;
740	int uidcheck = 0;
741	int ruidcheck = 0;
742	int ttycheck = 0;
743
744	if (namelen != 1 && !(namelen == 0 && cmd == KERN_PROC_ALL))
745		return (EINVAL);
746
747	if (is_64_bit) {
748		sizeof_kproc = sizeof(user_kproc);
749		kprocp = &user_kproc;
750	} else {
751		sizeof_kproc = sizeof(user32_kproc);
752		kprocp = &user32_kproc;
753	}
754
755	switch (cmd) {
756
757		case KERN_PROC_PID:
758			filterfn = sysdoproc_filt_KERN_PROC_PID;
759			break;
760
761		case KERN_PROC_PGRP:
762			filterfn = sysdoproc_filt_KERN_PROC_PGRP;
763			break;
764
765		case KERN_PROC_TTY:
766			ttycheck = 1;
767			break;
768
769		case KERN_PROC_UID:
770			uidcheck = 1;
771			break;
772
773		case KERN_PROC_RUID:
774			ruidcheck = 1;
775			break;
776
777#if CONFIG_LCTX
778		case KERN_PROC_LCID:
779			filterfn = sysdoproc_filt_KERN_PROC_LCID;
780			break;
781#endif
782		case KERN_PROC_ALL:
783			break;
784
785		default:
786			/* must be kern.proc.<unknown> */
787			return (ENOTSUP);
788	}
789
790	error = 0;
791	args.buflen = buflen;
792	args.kprocp = kprocp;
793	args.is_64_bit = is_64_bit;
794	args.dp = dp;
795	args.needed = needed;
796	args.errorp = &error;
797	args.uidcheck = uidcheck;
798	args.ruidcheck = ruidcheck;
799	args.ttycheck = ttycheck;
800	args.sizeof_kproc = sizeof_kproc;
801	if (namelen)
802		args.uidval = name[0];
803
804	proc_iterate((PROC_ALLPROCLIST | PROC_ZOMBPROCLIST),
805	    sysdoproc_callback, &args, filterfn, name);
806
807	if (error)
808		return (error);
809
810	dp = args.dp;
811	needed = args.needed;
812
813	if (where != USER_ADDR_NULL) {
814		req->oldlen = dp - where;
815		if (needed > req->oldlen)
816			return (ENOMEM);
817	} else {
818		needed += KERN_PROCSLOP;
819		req->oldlen = needed;
820	}
821	/* adjust index so we return the right required/consumed amount */
822	req->oldidx += req->oldlen;
823	return (0);
824}
825
826/*
827 * We specify the subcommand code for multiple nodes as the 'req->arg2' value
828 * in the sysctl declaration itself, which comes into the handler function
829 * as 'oidp->oid_arg2'.
830 *
831 * For these particular sysctls, since they have well known OIDs, we could
832 * have just obtained it from the '((int *)arg1)[0]' parameter, but that would
833 * not demonstrate how to handle multiple sysctls that used OID_AUTO instead
834 * of a well known value with a common handler function.  This is desirable,
835 * because we want well known values to "go away" at some future date.
836 *
837 * It should be noted that the value of '((int *)arg1)[1]' is used for many
838 * an integer parameter to the subcommand for many of these sysctls; we'd
839 * rather have used '((int *)arg1)[0]' for that, or even better, an element
840 * in a structure passed in as the the 'newp' argument to sysctlbyname(3),
841 * and then use leaf-node permissions enforcement, but that would have
842 * necessitated modifying user space code to correspond to the interface
843 * change, and we are striving for binary backward compatibility here; even
844 * though these are SPI, and not intended for use by user space applications
845 * which are not themselves system tools or libraries, some applications
846 * have erroneously used them.
847 */
848SYSCTL_PROC(_kern_proc, KERN_PROC_ALL, all, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
849	0,			/* Pointer argument (arg1) */
850	KERN_PROC_ALL,		/* Integer argument (arg2) */
851	sysctl_prochandle,	/* Handler function */
852	NULL,			/* Data is size variant on ILP32/LP64 */
853	"");
854SYSCTL_PROC(_kern_proc, KERN_PROC_PID, pid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
855	0,			/* Pointer argument (arg1) */
856	KERN_PROC_PID,		/* Integer argument (arg2) */
857	sysctl_prochandle,	/* Handler function */
858	NULL,			/* Data is size variant on ILP32/LP64 */
859	"");
860SYSCTL_PROC(_kern_proc, KERN_PROC_TTY, tty, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
861	0,			/* Pointer argument (arg1) */
862	KERN_PROC_TTY,		/* Integer argument (arg2) */
863	sysctl_prochandle,	/* Handler function */
864	NULL,			/* Data is size variant on ILP32/LP64 */
865	"");
866SYSCTL_PROC(_kern_proc, KERN_PROC_PGRP, pgrp, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
867	0,			/* Pointer argument (arg1) */
868	KERN_PROC_PGRP,		/* Integer argument (arg2) */
869	sysctl_prochandle,	/* Handler function */
870	NULL,			/* Data is size variant on ILP32/LP64 */
871	"");
872SYSCTL_PROC(_kern_proc, KERN_PROC_UID, uid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
873	0,			/* Pointer argument (arg1) */
874	KERN_PROC_UID,		/* Integer argument (arg2) */
875	sysctl_prochandle,	/* Handler function */
876	NULL,			/* Data is size variant on ILP32/LP64 */
877	"");
878SYSCTL_PROC(_kern_proc, KERN_PROC_RUID, ruid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
879	0,			/* Pointer argument (arg1) */
880	KERN_PROC_RUID,		/* Integer argument (arg2) */
881	sysctl_prochandle,	/* Handler function */
882	NULL,			/* Data is size variant on ILP32/LP64 */
883	"");
884SYSCTL_PROC(_kern_proc, KERN_PROC_LCID, lcid, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
885	0,			/* Pointer argument (arg1) */
886	KERN_PROC_LCID,		/* Integer argument (arg2) */
887	sysctl_prochandle,	/* Handler function */
888	NULL,			/* Data is size variant on ILP32/LP64 */
889	"");
890
891
892/*
893 * Fill in non-zero fields of an eproc structure for the specified process.
894 */
895STATIC void
896fill_user32_eproc(proc_t p, struct user32_eproc *__restrict ep)
897{
898	struct tty *tp;
899	struct pgrp *pg;
900	struct session *sessp;
901	kauth_cred_t my_cred;
902
903	pg = proc_pgrp(p);
904	sessp = proc_session(p);
905
906	if (pg != PGRP_NULL) {
907		ep->e_pgid = p->p_pgrpid;
908		ep->e_jobc = pg->pg_jobc;
909		if (sessp != SESSION_NULL && sessp->s_ttyvp)
910			ep->e_flag = EPROC_CTTY;
911	}
912#if CONFIG_LCTX
913	if (p->p_lctx)
914		ep->e_lcid = p->p_lctx->lc_id;
915#endif
916	ep->e_ppid = p->p_ppid;
917	if (p->p_ucred) {
918		my_cred = kauth_cred_proc_ref(p);
919
920		/* A fake historical pcred */
921		ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
922		ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
923		ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
924		ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
925
926		/* A fake historical *kauth_cred_t */
927		ep->e_ucred.cr_ref = my_cred->cr_ref;
928		ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
929		ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
930		bcopy(posix_cred_get(my_cred)->cr_groups,
931			ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
932
933		kauth_cred_unref(&my_cred);
934	}
935
936	if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
937	     (tp = SESSION_TP(sessp))) {
938		ep->e_tdev = tp->t_dev;
939		ep->e_tpgid = sessp->s_ttypgrpid;
940	} else
941		ep->e_tdev = NODEV;
942
943	if (sessp != SESSION_NULL) {
944		if (SESS_LEADER(p, sessp))
945			ep->e_flag |= EPROC_SLEADER;
946		session_rele(sessp);
947	}
948	if (pg != PGRP_NULL)
949		pg_rele(pg);
950}
951
952/*
953 * Fill in non-zero fields of an LP64 eproc structure for the specified process.
954 */
955STATIC void
956fill_user64_eproc(proc_t p, struct user64_eproc *__restrict ep)
957{
958	struct tty *tp;
959	struct pgrp *pg;
960	struct session *sessp;
961	kauth_cred_t my_cred;
962
963	pg = proc_pgrp(p);
964	sessp = proc_session(p);
965
966	if (pg != PGRP_NULL) {
967		ep->e_pgid = p->p_pgrpid;
968		ep->e_jobc = pg->pg_jobc;
969		if (sessp != SESSION_NULL && sessp->s_ttyvp)
970			ep->e_flag = EPROC_CTTY;
971	}
972#if CONFIG_LCTX
973	if (p->p_lctx)
974		ep->e_lcid = p->p_lctx->lc_id;
975#endif
976	ep->e_ppid = p->p_ppid;
977	if (p->p_ucred) {
978		my_cred = kauth_cred_proc_ref(p);
979
980		/* A fake historical pcred */
981		ep->e_pcred.p_ruid = kauth_cred_getruid(my_cred);
982		ep->e_pcred.p_svuid = kauth_cred_getsvuid(my_cred);
983		ep->e_pcred.p_rgid = kauth_cred_getrgid(my_cred);
984		ep->e_pcred.p_svgid = kauth_cred_getsvgid(my_cred);
985
986		/* A fake historical *kauth_cred_t */
987		ep->e_ucred.cr_ref = my_cred->cr_ref;
988		ep->e_ucred.cr_uid = kauth_cred_getuid(my_cred);
989		ep->e_ucred.cr_ngroups = posix_cred_get(my_cred)->cr_ngroups;
990		bcopy(posix_cred_get(my_cred)->cr_groups,
991			ep->e_ucred.cr_groups, NGROUPS * sizeof (gid_t));
992
993		kauth_cred_unref(&my_cred);
994	}
995
996	if ((p->p_flag & P_CONTROLT) && (sessp != SESSION_NULL) &&
997	     (tp = SESSION_TP(sessp))) {
998		ep->e_tdev = tp->t_dev;
999		ep->e_tpgid = sessp->s_ttypgrpid;
1000	} else
1001		ep->e_tdev = NODEV;
1002
1003	if (sessp != SESSION_NULL) {
1004		if (SESS_LEADER(p, sessp))
1005			ep->e_flag |= EPROC_SLEADER;
1006		session_rele(sessp);
1007	}
1008	if (pg != PGRP_NULL)
1009		pg_rele(pg);
1010}
1011
1012/*
1013 * Fill in an eproc structure for the specified process.
1014 * bzeroed by our caller, so only set non-zero fields.
1015 */
1016STATIC void
1017fill_user32_externproc(proc_t p, struct user32_extern_proc *__restrict exp)
1018{
1019	exp->p_starttime.tv_sec = p->p_start.tv_sec;
1020	exp->p_starttime.tv_usec = p->p_start.tv_usec;
1021	exp->p_flag = p->p_flag;
1022	if (p->p_lflag & P_LTRACED)
1023		exp->p_flag |= P_TRACED;
1024	if (p->p_lflag & P_LPPWAIT)
1025		exp->p_flag |= P_PPWAIT;
1026	if (p->p_lflag & P_LEXIT)
1027		exp->p_flag |= P_WEXIT;
1028	exp->p_stat = p->p_stat;
1029	exp->p_pid = p->p_pid;
1030	exp->p_oppid = p->p_oppid;
1031	/* Mach related  */
1032	exp->user_stack = p->user_stack;
1033	exp->p_debugger = p->p_debugger;
1034	exp->sigwait = p->sigwait;
1035	/* scheduling */
1036#ifdef _PROC_HAS_SCHEDINFO_
1037	exp->p_estcpu = p->p_estcpu;
1038	exp->p_pctcpu = p->p_pctcpu;
1039	exp->p_slptime = p->p_slptime;
1040#endif
1041	exp->p_realtimer.it_interval.tv_sec =
1042		(user32_time_t)p->p_realtimer.it_interval.tv_sec;
1043	exp->p_realtimer.it_interval.tv_usec =
1044		(__int32_t)p->p_realtimer.it_interval.tv_usec;
1045
1046	exp->p_realtimer.it_value.tv_sec =
1047		(user32_time_t)p->p_realtimer.it_value.tv_sec;
1048	exp->p_realtimer.it_value.tv_usec =
1049		(__int32_t)p->p_realtimer.it_value.tv_usec;
1050
1051	exp->p_rtime.tv_sec = (user32_time_t)p->p_rtime.tv_sec;
1052	exp->p_rtime.tv_usec = (__int32_t)p->p_rtime.tv_usec;
1053
1054	exp->p_sigignore = p->p_sigignore;
1055	exp->p_sigcatch = p->p_sigcatch;
1056	exp->p_priority = p->p_priority;
1057	exp->p_nice = p->p_nice;
1058	bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1059	exp->p_xstat = p->p_xstat;
1060	exp->p_acflag = p->p_acflag;
1061}
1062
1063/*
1064 * Fill in an LP64 version of extern_proc structure for the specified process.
1065 */
1066STATIC void
1067fill_user64_externproc(proc_t p, struct user64_extern_proc *__restrict exp)
1068{
1069	exp->p_starttime.tv_sec = p->p_start.tv_sec;
1070	exp->p_starttime.tv_usec = p->p_start.tv_usec;
1071	exp->p_flag = p->p_flag;
1072	if (p->p_lflag & P_LTRACED)
1073		exp->p_flag |= P_TRACED;
1074	if (p->p_lflag & P_LPPWAIT)
1075		exp->p_flag |= P_PPWAIT;
1076	if (p->p_lflag & P_LEXIT)
1077		exp->p_flag |= P_WEXIT;
1078	exp->p_stat = p->p_stat;
1079	exp->p_pid = p->p_pid;
1080	exp->p_oppid = p->p_oppid;
1081	/* Mach related  */
1082	exp->user_stack = p->user_stack;
1083	exp->p_debugger = p->p_debugger;
1084	exp->sigwait = p->sigwait;
1085	/* scheduling */
1086#ifdef _PROC_HAS_SCHEDINFO_
1087	exp->p_estcpu = p->p_estcpu;
1088	exp->p_pctcpu = p->p_pctcpu;
1089	exp->p_slptime = p->p_slptime;
1090#endif
1091	exp->p_realtimer.it_interval.tv_sec = p->p_realtimer.it_interval.tv_sec;
1092	exp->p_realtimer.it_interval.tv_usec = p->p_realtimer.it_interval.tv_usec;
1093
1094	exp->p_realtimer.it_value.tv_sec = p->p_realtimer.it_value.tv_sec;
1095	exp->p_realtimer.it_value.tv_usec = p->p_realtimer.it_value.tv_usec;
1096
1097	exp->p_rtime.tv_sec = p->p_rtime.tv_sec;
1098	exp->p_rtime.tv_usec = p->p_rtime.tv_usec;
1099
1100	exp->p_sigignore = p->p_sigignore;
1101	exp->p_sigcatch = p->p_sigcatch;
1102	exp->p_priority = p->p_priority;
1103	exp->p_nice = p->p_nice;
1104	bcopy(&p->p_comm, &exp->p_comm, MAXCOMLEN);
1105	exp->p_xstat = p->p_xstat;
1106	exp->p_acflag = p->p_acflag;
1107}
1108
1109STATIC void
1110fill_user32_proc(proc_t p, struct user32_kinfo_proc *__restrict kp)
1111{
1112	/* on a 64 bit kernel, 32 bit users get some truncated information */
1113	fill_user32_externproc(p, &kp->kp_proc);
1114	fill_user32_eproc(p, &kp->kp_eproc);
1115}
1116
1117STATIC void
1118fill_user64_proc(proc_t p, struct user64_kinfo_proc *__restrict kp)
1119{
1120	fill_user64_externproc(p, &kp->kp_proc);
1121	fill_user64_eproc(p, &kp->kp_eproc);
1122}
1123
1124STATIC int
1125sysctl_kdebug_ops SYSCTL_HANDLER_ARGS
1126{
1127	__unused int cmd = oidp->oid_arg2;	/* subcommand*/
1128	int *name = arg1;		/* oid element argument vector */
1129	int namelen = arg2;		/* number of oid element arguments */
1130	user_addr_t oldp = req->oldptr;	/* user buffer copy out address */
1131	size_t *oldlenp = &req->oldlen;	/* user buffer copy out size */
1132//	user_addr_t newp = req->newptr;	/* user buffer copy in address */
1133//	size_t newlen = req->newlen;	/* user buffer copy in size */
1134
1135	proc_t p = current_proc();
1136	int ret=0;
1137
1138	if (namelen == 0)
1139		return(ENOTSUP);
1140
1141	ret = suser(kauth_cred_get(), &p->p_acflag);
1142#if KPERF
1143	/* Non-root processes may be blessed by kperf to access data
1144	 * logged into trace.
1145	 */
1146	if (ret)
1147		ret = kperf_access_check();
1148#endif /* KPERF */
1149	if (ret)
1150		return(ret);
1151
1152	switch(name[0]) {
1153	case KERN_KDEFLAGS:
1154	case KERN_KDDFLAGS:
1155	case KERN_KDENABLE:
1156	case KERN_KDGETBUF:
1157	case KERN_KDSETUP:
1158	case KERN_KDREMOVE:
1159	case KERN_KDSETREG:
1160	case KERN_KDGETREG:
1161	case KERN_KDREADTR:
1162        case KERN_KDWRITETR:
1163        case KERN_KDWRITEMAP:
1164	case KERN_KDPIDTR:
1165	case KERN_KDTHRMAP:
1166	case KERN_KDPIDEX:
1167	case KERN_KDSETRTCDEC:
1168	case KERN_KDSETBUF:
1169	case KERN_KDGETENTROPY:
1170	case KERN_KDENABLE_BG_TRACE:
1171	case KERN_KDDISABLE_BG_TRACE:
1172	case KERN_KDREADCURTHRMAP:
1173	case KERN_KDSET_TYPEFILTER:
1174        case KERN_KDBUFWAIT:
1175	case KERN_KDCPUMAP:
1176
1177	        ret = kdbg_control(name, namelen, oldp, oldlenp);
1178	        break;
1179	default:
1180		ret= ENOTSUP;
1181		break;
1182	}
1183
1184	/* adjust index so we return the right required/consumed amount */
1185	if (!ret)
1186		req->oldidx += req->oldlen;
1187
1188	return (ret);
1189}
1190SYSCTL_PROC(_kern, KERN_KDEBUG, kdebug, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1191	0,			/* Pointer argument (arg1) */
1192	0,			/* Integer argument (arg2) */
1193	sysctl_kdebug_ops,	/* Handler function */
1194	NULL,			/* Data pointer */
1195	"");
1196
1197
1198/*
1199 * Return the top *sizep bytes of the user stack, or the entire area of the
1200 * user stack down through the saved exec_path, whichever is smaller.
1201 */
1202STATIC int
1203sysctl_doprocargs SYSCTL_HANDLER_ARGS
1204{
1205	__unused int cmd = oidp->oid_arg2;	/* subcommand*/
1206	int *name = arg1;		/* oid element argument vector */
1207	int namelen = arg2;		/* number of oid element arguments */
1208	user_addr_t oldp = req->oldptr;	/* user buffer copy out address */
1209	size_t *oldlenp = &req->oldlen;	/* user buffer copy out size */
1210//	user_addr_t newp = req->newptr;	/* user buffer copy in address */
1211//	size_t newlen = req->newlen;	/* user buffer copy in size */
1212	int error;
1213
1214	error =  sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 0);
1215
1216	/* adjust index so we return the right required/consumed amount */
1217	if (!error)
1218		req->oldidx += req->oldlen;
1219
1220	return (error);
1221}
1222SYSCTL_PROC(_kern, KERN_PROCARGS, procargs, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1223	0,			/* Pointer argument (arg1) */
1224	0,			/* Integer argument (arg2) */
1225	sysctl_doprocargs,	/* Handler function */
1226	NULL,			/* Data pointer */
1227	"");
1228
1229STATIC int
1230sysctl_doprocargs2 SYSCTL_HANDLER_ARGS
1231{
1232	__unused int cmd = oidp->oid_arg2;	/* subcommand*/
1233	int *name = arg1;		/* oid element argument vector */
1234	int namelen = arg2;		/* number of oid element arguments */
1235	user_addr_t oldp = req->oldptr;	/* user buffer copy out address */
1236	size_t *oldlenp = &req->oldlen;	/* user buffer copy out size */
1237//	user_addr_t newp = req->newptr;	/* user buffer copy in address */
1238//	size_t newlen = req->newlen;	/* user buffer copy in size */
1239	int error;
1240
1241	error = sysctl_procargsx( name, namelen, oldp, oldlenp, current_proc(), 1);
1242
1243	/* adjust index so we return the right required/consumed amount */
1244	if (!error)
1245		req->oldidx += req->oldlen;
1246
1247	return (error);
1248}
1249SYSCTL_PROC(_kern, KERN_PROCARGS2, procargs2, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED,
1250	0,			/* Pointer argument (arg1) */
1251	0,			/* Integer argument (arg2) */
1252	sysctl_doprocargs2,	/* Handler function */
1253	NULL,			/* Data pointer */
1254	"");
1255
1256STATIC int
1257sysctl_procargsx(int *name, u_int namelen, user_addr_t where,
1258                 size_t *sizep, proc_t cur_proc, int argc_yes)
1259{
1260	proc_t p;
1261	int buflen = where != USER_ADDR_NULL ? *sizep : 0;
1262	int error = 0;
1263	struct _vm_map *proc_map;
1264	struct task * task;
1265	vm_map_copy_t	tmp;
1266	user_addr_t	arg_addr;
1267	size_t		arg_size;
1268	caddr_t data;
1269	size_t argslen=0;
1270	int size;
1271	vm_offset_t	copy_start, copy_end;
1272	kern_return_t ret;
1273	int pid;
1274	kauth_cred_t my_cred;
1275	uid_t uid;
1276
1277	if ( namelen < 1 )
1278		return(EINVAL);
1279
1280	if (argc_yes)
1281		buflen -= sizeof(int);		/* reserve first word to return argc */
1282
1283	/* we only care about buflen when where (oldp from sysctl) is not NULL. */
1284	/* when where (oldp from sysctl) is NULL and sizep (oldlenp from sysctl */
1285	/* is not NULL then the caller wants us to return the length needed to */
1286	/* hold the data we would return */
1287	if (where != USER_ADDR_NULL && (buflen <= 0 || buflen > ARG_MAX)) {
1288		return(EINVAL);
1289	}
1290	arg_size = buflen;
1291
1292	/*
1293	 *	Lookup process by pid
1294	 */
1295	pid = name[0];
1296	p = proc_find(pid);
1297	if (p == NULL) {
1298		return(EINVAL);
1299	}
1300
1301	/*
1302	 *	Copy the top N bytes of the stack.
1303	 *	On all machines we have so far, the stack grows
1304	 *	downwards.
1305	 *
1306	 *	If the user expects no more than N bytes of
1307	 *	argument list, use that as a guess for the
1308	 *	size.
1309	 */
1310
1311	if (!p->user_stack) {
1312		proc_rele(p);
1313		return(EINVAL);
1314	}
1315
1316	if (where == USER_ADDR_NULL) {
1317		/* caller only wants to know length of proc args data */
1318		if (sizep == NULL) {
1319			proc_rele(p);
1320			return(EFAULT);
1321		}
1322
1323		 size = p->p_argslen;
1324		proc_rele(p);
1325		 if (argc_yes) {
1326		 	size += sizeof(int);
1327		 }
1328		 else {
1329			/*
1330			 * old PROCARGS will return the executable's path and plus some
1331			 * extra space for work alignment and data tags
1332			 */
1333		 	size += PATH_MAX + (6 * sizeof(int));
1334		 }
1335		size += (size & (sizeof(int) - 1)) ? (sizeof(int) - (size & (sizeof(int) - 1))) : 0;
1336		*sizep = size;
1337		return (0);
1338	}
1339
1340	my_cred = kauth_cred_proc_ref(p);
1341	uid = kauth_cred_getuid(my_cred);
1342	kauth_cred_unref(&my_cred);
1343
1344	if ((uid != kauth_cred_getuid(kauth_cred_get()))
1345		&& suser(kauth_cred_get(), &cur_proc->p_acflag)) {
1346		proc_rele(p);
1347		return (EINVAL);
1348	}
1349
1350	if ((u_int)arg_size > p->p_argslen)
1351	        arg_size = round_page(p->p_argslen);
1352
1353	arg_addr = p->user_stack - arg_size;
1354
1355
1356	/*
1357	 *	Before we can block (any VM code), make another
1358	 *	reference to the map to keep it alive.  We do
1359	 *	that by getting a reference on the task itself.
1360	 */
1361	task = p->task;
1362	if (task == NULL) {
1363		proc_rele(p);
1364		return(EINVAL);
1365	}
1366
1367	argslen = p->p_argslen;
1368	/*
1369	 * Once we have a task reference we can convert that into a
1370	 * map reference, which we will use in the calls below.  The
1371	 * task/process may change its map after we take this reference
1372	 * (see execve), but the worst that will happen then is a return
1373	 * of stale info (which is always a possibility).
1374	 */
1375	task_reference(task);
1376	proc_rele(p);
1377	proc_map = get_task_map_reference(task);
1378	task_deallocate(task);
1379
1380	if (proc_map == NULL)
1381		return(EINVAL);
1382
1383
1384	ret = kmem_alloc(kernel_map, &copy_start, round_page(arg_size));
1385	if (ret != KERN_SUCCESS) {
1386		vm_map_deallocate(proc_map);
1387		return(ENOMEM);
1388	}
1389
1390	copy_end = round_page(copy_start + arg_size);
1391
1392	if( vm_map_copyin(proc_map, (vm_map_address_t)arg_addr,
1393			  (vm_map_size_t)arg_size, FALSE, &tmp) != KERN_SUCCESS) {
1394			vm_map_deallocate(proc_map);
1395			kmem_free(kernel_map, copy_start,
1396					round_page(arg_size));
1397			return (EIO);
1398	}
1399
1400	/*
1401	 *	Now that we've done the copyin from the process'
1402	 *	map, we can release the reference to it.
1403	 */
1404	vm_map_deallocate(proc_map);
1405
1406	if( vm_map_copy_overwrite(kernel_map,
1407				  (vm_map_address_t)copy_start,
1408				  tmp, FALSE) != KERN_SUCCESS) {
1409			kmem_free(kernel_map, copy_start,
1410					round_page(arg_size));
1411			return (EIO);
1412	}
1413
1414	if (arg_size > argslen) {
1415		data = (caddr_t) (copy_end - argslen);
1416		size = argslen;
1417	} else {
1418		data = (caddr_t) (copy_end - arg_size);
1419		size = arg_size;
1420	}
1421
1422	if (argc_yes) {
1423		/* Put processes argc as the first word in the copyout buffer */
1424		suword(where, p->p_argc);
1425		error = copyout(data, (where + sizeof(int)), size);
1426		size += sizeof(int);
1427	} else {
1428		error = copyout(data, where, size);
1429
1430		/*
1431		 * Make the old PROCARGS work to return the executable's path
1432		 * But, only if there is enough space in the provided buffer
1433		 *
1434		 * on entry: data [possibily] points to the beginning of the path
1435		 *
1436		 * Note: we keep all pointers&sizes aligned to word boundries
1437		 */
1438		if ( (! error) && (buflen > 0 && (u_int)buflen > argslen) )
1439		{
1440			int binPath_sz, alignedBinPath_sz = 0;
1441			int extraSpaceNeeded, addThis;
1442			user_addr_t placeHere;
1443			char * str = (char *) data;
1444			int max_len = size;
1445
1446			/* Some apps are really bad about messing up their stacks
1447			   So, we have to be extra careful about getting the length
1448			   of the executing binary.  If we encounter an error, we bail.
1449			*/
1450
1451			/* Limit ourselves to PATH_MAX paths */
1452			if ( max_len > PATH_MAX ) max_len = PATH_MAX;
1453
1454			binPath_sz = 0;
1455
1456			while ( (binPath_sz < max_len-1) && (*str++ != 0) )
1457				binPath_sz++;
1458
1459			/* If we have a NUL terminator, copy it, too */
1460			if (binPath_sz < max_len-1) binPath_sz += 1;
1461
1462			/* Pre-Flight the space requiremnts */
1463
1464			/* Account for the padding that fills out binPath to the next word */
1465			alignedBinPath_sz += (binPath_sz & (sizeof(int)-1)) ? (sizeof(int)-(binPath_sz & (sizeof(int)-1))) : 0;
1466
1467			placeHere = where + size;
1468
1469			/* Account for the bytes needed to keep placeHere word aligned */
1470			addThis = (placeHere & (sizeof(int)-1)) ? (sizeof(int)-(placeHere & (sizeof(int)-1))) : 0;
1471
1472			/* Add up all the space that is needed */
1473			extraSpaceNeeded = alignedBinPath_sz + addThis + binPath_sz + (4 * sizeof(int));
1474
1475			/* is there is room to tack on argv[0]? */
1476			if ( (buflen & ~(sizeof(int)-1)) >= ( argslen + extraSpaceNeeded ))
1477			{
1478				placeHere += addThis;
1479				suword(placeHere, 0);
1480				placeHere += sizeof(int);
1481				suword(placeHere, 0xBFFF0000);
1482				placeHere += sizeof(int);
1483				suword(placeHere, 0);
1484				placeHere += sizeof(int);
1485				error = copyout(data, placeHere, binPath_sz);
1486				if ( ! error )
1487				{
1488					placeHere += binPath_sz;
1489					suword(placeHere, 0);
1490					size += extraSpaceNeeded;
1491				}
1492			}
1493		}
1494	}
1495
1496	if (copy_start != (vm_offset_t) 0) {
1497		kmem_free(kernel_map, copy_start, copy_end - copy_start);
1498	}
1499	if (error) {
1500		return(error);
1501	}
1502
1503	if (where != USER_ADDR_NULL)
1504		*sizep = size;
1505	return (0);
1506}
1507
1508
1509/*
1510 * Max number of concurrent aio requests
1511 */
1512STATIC int
1513sysctl_aiomax
1514(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1515{
1516	int new_value, changed;
1517	int error = sysctl_io_number(req, aio_max_requests, sizeof(int), &new_value, &changed);
1518	if (changed) {
1519		 /* make sure the system-wide limit is greater than the per process limit */
1520		if (new_value >= aio_max_requests_per_process && new_value <= AIO_MAX_REQUESTS)
1521			aio_max_requests = new_value;
1522		else
1523			error = EINVAL;
1524	}
1525	return(error);
1526}
1527
1528
1529/*
1530 * Max number of concurrent aio requests per process
1531 */
1532STATIC int
1533sysctl_aioprocmax
1534(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1535{
1536	int new_value, changed;
1537	int error = sysctl_io_number(req, aio_max_requests_per_process, sizeof(int), &new_value, &changed);
1538	if (changed) {
1539		/* make sure per process limit is less than the system-wide limit */
1540		if (new_value <= aio_max_requests && new_value >= AIO_LISTIO_MAX)
1541			aio_max_requests_per_process = new_value;
1542		else
1543			error = EINVAL;
1544	}
1545	return(error);
1546}
1547
1548
1549/*
1550 * Max number of async IO worker threads
1551 */
1552STATIC int
1553sysctl_aiothreads
1554(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1555{
1556	int new_value, changed;
1557	int error = sysctl_io_number(req, aio_worker_threads, sizeof(int), &new_value, &changed);
1558	if (changed) {
1559		/* we only allow an increase in the number of worker threads */
1560	        if (new_value > aio_worker_threads ) {
1561		        _aio_create_worker_threads((new_value - aio_worker_threads));
1562			aio_worker_threads = new_value;
1563		}
1564		else
1565		        error = EINVAL;
1566	}
1567	return(error);
1568}
1569
1570
1571/*
1572 * System-wide limit on the max number of processes
1573 */
1574STATIC int
1575sysctl_maxproc
1576(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1577{
1578	int new_value, changed;
1579	int error = sysctl_io_number(req, maxproc, sizeof(int), &new_value, &changed);
1580	if (changed) {
1581		AUDIT_ARG(value32, new_value);
1582		/* make sure the system-wide limit is less than the configured hard
1583		   limit set at kernel compilation */
1584		if (new_value <= hard_maxproc && new_value > 0)
1585			maxproc = new_value;
1586		else
1587			error = EINVAL;
1588	}
1589	return(error);
1590}
1591
1592SYSCTL_STRING(_kern, KERN_OSTYPE, ostype,
1593		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1594		ostype, 0, "");
1595SYSCTL_STRING(_kern, KERN_OSRELEASE, osrelease,
1596		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1597		osrelease, 0, "");
1598SYSCTL_INT(_kern, KERN_OSREV, osrevision,
1599		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1600		(int *)NULL, BSD, "");
1601SYSCTL_STRING(_kern, KERN_VERSION, version,
1602		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1603		version, 0, "");
1604SYSCTL_STRING(_kern, OID_AUTO, uuid,
1605		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1606		&kernel_uuid_string[0], 0, "");
1607
1608#if DEBUG
1609int debug_kprint_syscall = 0;
1610char debug_kprint_syscall_process[MAXCOMLEN+1];
1611
1612/* Thread safe: bits and string value are not used to reclaim state */
1613SYSCTL_INT (_debug, OID_AUTO, kprint_syscall,
1614	    CTLFLAG_RW | CTLFLAG_LOCKED, &debug_kprint_syscall, 0, "kprintf syscall tracing");
1615SYSCTL_STRING(_debug, OID_AUTO, kprint_syscall_process,
1616			  CTLFLAG_RW | CTLFLAG_LOCKED, debug_kprint_syscall_process, sizeof(debug_kprint_syscall_process),
1617			  "name of process for kprintf syscall tracing");
1618
1619int debug_kprint_current_process(const char **namep)
1620{
1621	struct proc *p = current_proc();
1622
1623	if (p == NULL) {
1624		return 0;
1625	}
1626
1627	if (debug_kprint_syscall_process[0]) {
1628		/* user asked to scope tracing to a particular process name */
1629		if(0 == strncmp(debug_kprint_syscall_process,
1630						p->p_comm, sizeof(debug_kprint_syscall_process))) {
1631			/* no value in telling the user that we traced what they asked */
1632			if(namep) *namep = NULL;
1633
1634			return 1;
1635		} else {
1636			return 0;
1637		}
1638	}
1639
1640	/* trace all processes. Tell user what we traced */
1641	if (namep) {
1642		*namep = p->p_comm;
1643	}
1644
1645	return 1;
1646}
1647#endif
1648
1649/* PR-5293665: need to use a callback function for kern.osversion to set
1650 * osversion in IORegistry */
1651
1652STATIC int
1653sysctl_osversion(__unused struct sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req)
1654{
1655    int rval = 0;
1656
1657    rval = sysctl_handle_string(oidp, arg1, arg2, req);
1658
1659    if (req->newptr) {
1660        IORegistrySetOSBuildVersion((char *)arg1);
1661    }
1662
1663    return rval;
1664}
1665
1666SYSCTL_PROC(_kern, KERN_OSVERSION, osversion,
1667        CTLFLAG_RW | CTLFLAG_KERN | CTLTYPE_STRING | CTLFLAG_LOCKED,
1668        osversion, 256 /* OSVERSIZE*/,
1669        sysctl_osversion, "A", "");
1670
1671STATIC int
1672sysctl_sysctl_bootargs
1673(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1674{
1675	int error;
1676	char buf[256];
1677
1678	strlcpy(buf, PE_boot_args(), 256);
1679	error = sysctl_io_string(req, buf, 256, 0, NULL);
1680	return(error);
1681}
1682
1683SYSCTL_PROC(_kern, OID_AUTO, bootargs,
1684	CTLFLAG_LOCKED | CTLFLAG_RD | CTLFLAG_KERN | CTLTYPE_STRING,
1685	NULL, 0,
1686	sysctl_sysctl_bootargs, "A", "bootargs");
1687
1688SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles,
1689		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1690		&maxfiles, 0, "");
1691SYSCTL_INT(_kern, KERN_ARGMAX, argmax,
1692		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1693		(int *)NULL, ARG_MAX, "");
1694SYSCTL_INT(_kern, KERN_POSIX1, posix1version,
1695		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1696		(int *)NULL, _POSIX_VERSION, "");
1697SYSCTL_INT(_kern, KERN_NGROUPS, ngroups,
1698		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1699		(int *)NULL, NGROUPS_MAX, "");
1700SYSCTL_INT(_kern, KERN_JOB_CONTROL, job_control,
1701		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1702		(int *)NULL, 1, "");
1703#if 1	/* _POSIX_SAVED_IDS from <unistd.h> */
1704SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1705		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1706		(int *)NULL, 1, "");
1707#else
1708SYSCTL_INT(_kern, KERN_SAVED_IDS, saved_ids,
1709		CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
1710		NULL, 0, "");
1711#endif
1712SYSCTL_INT(_kern, OID_AUTO, num_files,
1713		CTLFLAG_RD | CTLFLAG_LOCKED,
1714		&nfiles, 0, "");
1715SYSCTL_COMPAT_INT(_kern, OID_AUTO, num_vnodes,
1716		CTLFLAG_RD | CTLFLAG_LOCKED,
1717		&numvnodes, 0, "");
1718SYSCTL_INT(_kern, OID_AUTO, num_tasks,
1719		CTLFLAG_RD | CTLFLAG_LOCKED,
1720		&task_max, 0, "");
1721SYSCTL_INT(_kern, OID_AUTO, num_threads,
1722		CTLFLAG_RD | CTLFLAG_LOCKED,
1723		&thread_max, 0, "");
1724SYSCTL_INT(_kern, OID_AUTO, num_taskthreads,
1725		CTLFLAG_RD | CTLFLAG_LOCKED,
1726		&task_threadmax, 0, "");
1727
1728STATIC int
1729sysctl_maxvnodes (__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1730{
1731	int oldval = desiredvnodes;
1732	int error = sysctl_io_number(req, desiredvnodes, sizeof(int), &desiredvnodes, NULL);
1733
1734	if (oldval != desiredvnodes) {
1735		reset_vmobjectcache(oldval, desiredvnodes);
1736		resize_namecache(desiredvnodes);
1737	}
1738
1739	return(error);
1740}
1741
1742SYSCTL_INT(_kern, OID_AUTO, namecache_disabled,
1743		CTLFLAG_RW | CTLFLAG_LOCKED,
1744		&nc_disabled, 0, "");
1745
1746SYSCTL_PROC(_kern, KERN_MAXVNODES, maxvnodes,
1747		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1748		0, 0, sysctl_maxvnodes, "I", "");
1749
1750SYSCTL_PROC(_kern, KERN_MAXPROC, maxproc,
1751		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1752		0, 0, sysctl_maxproc, "I", "");
1753
1754SYSCTL_PROC(_kern, KERN_AIOMAX, aiomax,
1755		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1756		0, 0, sysctl_aiomax, "I", "");
1757
1758SYSCTL_PROC(_kern, KERN_AIOPROCMAX, aioprocmax,
1759		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1760		0, 0, sysctl_aioprocmax, "I", "");
1761
1762SYSCTL_PROC(_kern, KERN_AIOTHREADS, aiothreads,
1763		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1764		0, 0, sysctl_aiothreads, "I", "");
1765
1766#if (DEVELOPMENT || DEBUG)
1767extern int sched_smt_balance;
1768SYSCTL_INT(_kern, OID_AUTO, sched_smt_balance,
1769               CTLFLAG_KERN| CTLFLAG_RW| CTLFLAG_LOCKED,
1770               &sched_smt_balance, 0, "");
1771#endif
1772
1773STATIC int
1774sysctl_securelvl
1775(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1776{
1777	int new_value, changed;
1778	int error = sysctl_io_number(req, securelevel, sizeof(int), &new_value, &changed);
1779	if (changed) {
1780		if (!(new_value < securelevel && req->p->p_pid != 1)) {
1781			proc_list_lock();
1782			securelevel = new_value;
1783			proc_list_unlock();
1784		} else {
1785			error = EPERM;
1786		}
1787	}
1788	return(error);
1789}
1790
1791SYSCTL_PROC(_kern, KERN_SECURELVL, securelevel,
1792		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1793		0, 0, sysctl_securelvl, "I", "");
1794
1795
1796STATIC int
1797sysctl_domainname
1798(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1799{
1800	int error, changed;
1801	error = sysctl_io_string(req, domainname, sizeof(domainname), 0, &changed);
1802	if (changed) {
1803		domainnamelen = strlen(domainname);
1804	}
1805	return(error);
1806}
1807
1808SYSCTL_PROC(_kern, KERN_DOMAINNAME, nisdomainname,
1809		CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1810		0, 0, sysctl_domainname, "A", "");
1811
1812SYSCTL_COMPAT_INT(_kern, KERN_HOSTID, hostid,
1813		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1814		&hostid, 0, "");
1815
1816STATIC int
1817sysctl_hostname
1818(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1819{
1820	int error, changed;
1821	error = sysctl_io_string(req, hostname, sizeof(hostname), 1, &changed);
1822	if (changed) {
1823		hostnamelen = req->newlen;
1824	}
1825	return(error);
1826}
1827
1828
1829SYSCTL_PROC(_kern, KERN_HOSTNAME, hostname,
1830		CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_LOCKED,
1831		0, 0, sysctl_hostname, "A", "");
1832
1833STATIC int
1834sysctl_procname
1835(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1836{
1837	/* Original code allowed writing, I'm copying this, although this all makes
1838	   no sense to me. Besides, this sysctl is never used. */
1839	return sysctl_io_string(req, &req->p->p_name[0], (2*MAXCOMLEN+1), 1, NULL);
1840}
1841
1842SYSCTL_PROC(_kern, KERN_PROCNAME, procname,
1843		CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
1844		0, 0, sysctl_procname, "A", "");
1845
1846SYSCTL_INT(_kern, KERN_SPECULATIVE_READS, speculative_reads_disabled,
1847		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1848		&speculative_reads_disabled, 0, "");
1849
1850SYSCTL_INT(_kern, OID_AUTO, ignore_is_ssd,
1851		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1852		&ignore_is_ssd, 0, "");
1853
1854SYSCTL_UINT(_kern, OID_AUTO, preheat_max_bytes,
1855		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1856		&preheat_max_bytes, 0, "");
1857
1858SYSCTL_UINT(_kern, OID_AUTO, preheat_min_bytes,
1859		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1860		&preheat_min_bytes, 0, "");
1861
1862SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max,
1863		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1864		&speculative_prefetch_max, 0, "");
1865
1866SYSCTL_UINT(_kern, OID_AUTO, speculative_prefetch_max_iosize,
1867		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1868		&speculative_prefetch_max_iosize, 0, "");
1869
1870SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_target,
1871		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1872		&vm_page_free_target, 0, "");
1873
1874SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_min,
1875		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1876		&vm_page_free_min, 0, "");
1877
1878SYSCTL_UINT(_kern, OID_AUTO, vm_page_free_reserved,
1879		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1880		&vm_page_free_reserved, 0, "");
1881
1882SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_percentage,
1883		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1884		&vm_page_speculative_percentage, 0, "");
1885
1886SYSCTL_UINT(_kern, OID_AUTO, vm_page_speculative_q_age_ms,
1887		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1888		&vm_page_speculative_q_age_ms, 0, "");
1889
1890SYSCTL_UINT(_kern, OID_AUTO, vm_max_delayed_work_limit,
1891		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1892		&vm_max_delayed_work_limit, 0, "");
1893
1894SYSCTL_UINT(_kern, OID_AUTO, vm_max_batch,
1895		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
1896		&vm_max_batch, 0, "");
1897
1898SYSCTL_STRING(_kern, OID_AUTO, bootsessionuuid,
1899		CTLFLAG_RD | CTLFLAG_LOCKED,
1900		&bootsessionuuid_string, sizeof(bootsessionuuid_string) , "");
1901
1902STATIC int
1903sysctl_boottime
1904(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1905{
1906	time_t tv_sec = boottime_sec();
1907	struct proc *p = req->p;
1908
1909	if (proc_is64bit(p)) {
1910		struct user64_timeval t;
1911		t.tv_sec = tv_sec;
1912		t.tv_usec = 0;
1913		return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1914	} else {
1915		struct user32_timeval t;
1916		t.tv_sec = tv_sec;
1917		t.tv_usec = 0;
1918		return sysctl_io_opaque(req, &t, sizeof(t), NULL);
1919	}
1920}
1921
1922SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime,
1923		CTLTYPE_STRUCT | CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
1924		0, 0, sysctl_boottime, "S,timeval", "");
1925
1926STATIC int
1927sysctl_symfile
1928(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1929{
1930	char *str;
1931	int error = get_kernel_symfile(req->p, &str);
1932	if (error)
1933		return (error);
1934	return sysctl_io_string(req, str, 0, 0, NULL);
1935}
1936
1937
1938SYSCTL_PROC(_kern, KERN_SYMFILE, symfile,
1939		CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_LOCKED,
1940		0, 0, sysctl_symfile, "A", "");
1941
1942#if NFSCLIENT
1943STATIC int
1944sysctl_netboot
1945(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1946{
1947	return sysctl_io_number(req, netboot_root(), sizeof(int), NULL, NULL);
1948}
1949
1950SYSCTL_PROC(_kern, KERN_NETBOOT, netboot,
1951		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1952		0, 0, sysctl_netboot, "I", "");
1953#endif
1954
1955#ifdef CONFIG_IMGSRC_ACCESS
1956/*
1957 * Legacy--act as if only one layer of nesting is possible.
1958 */
1959STATIC int
1960sysctl_imgsrcdev
1961(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
1962{
1963	vfs_context_t ctx = vfs_context_current();
1964	vnode_t devvp;
1965	int result;
1966
1967	if (!vfs_context_issuser(ctx)) {
1968		return EPERM;
1969	}
1970
1971	if (imgsrc_rootvnodes[0] == NULL) {
1972		return ENOENT;
1973	}
1974
1975	result = vnode_getwithref(imgsrc_rootvnodes[0]);
1976	if (result != 0) {
1977		return result;
1978	}
1979
1980	devvp = vnode_mount(imgsrc_rootvnodes[0])->mnt_devvp;
1981	result = vnode_getwithref(devvp);
1982	if (result != 0) {
1983		goto out;
1984	}
1985
1986	result = sysctl_io_number(req, vnode_specrdev(devvp), sizeof(dev_t), NULL, NULL);
1987
1988	vnode_put(devvp);
1989out:
1990	vnode_put(imgsrc_rootvnodes[0]);
1991	return result;
1992}
1993
1994SYSCTL_PROC(_kern, OID_AUTO, imgsrcdev,
1995		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
1996		0, 0, sysctl_imgsrcdev, "I", "");
1997
1998STATIC int
1999sysctl_imgsrcinfo
2000(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2001{
2002	int error;
2003	struct imgsrc_info info[MAX_IMAGEBOOT_NESTING];	/* 2 for now, no problem */
2004	uint32_t i;
2005	vnode_t rvp, devvp;
2006
2007	if (imgsrc_rootvnodes[0] == NULLVP) {
2008		return ENXIO;
2009	}
2010
2011	for (i = 0; i < MAX_IMAGEBOOT_NESTING; i++) {
2012		/*
2013		 * Go get the root vnode.
2014		 */
2015		rvp = imgsrc_rootvnodes[i];
2016		if (rvp == NULLVP) {
2017			break;
2018		}
2019
2020		error = vnode_get(rvp);
2021		if (error != 0) {
2022			return error;
2023		}
2024
2025		/*
2026		 * For now, no getting at a non-local volume.
2027		 */
2028		devvp = vnode_mount(rvp)->mnt_devvp;
2029		if (devvp == NULL) {
2030			vnode_put(rvp);
2031			return EINVAL;
2032		}
2033
2034		error = vnode_getwithref(devvp);
2035		if (error != 0) {
2036			vnode_put(rvp);
2037			return error;
2038		}
2039
2040		/*
2041		 * Fill in info.
2042		 */
2043		info[i].ii_dev = vnode_specrdev(devvp);
2044		info[i].ii_flags = 0;
2045		info[i].ii_height = i;
2046		bzero(info[i].ii_reserved, sizeof(info[i].ii_reserved));
2047
2048		vnode_put(devvp);
2049		vnode_put(rvp);
2050	}
2051
2052	return sysctl_io_opaque(req, info, i * sizeof(info[0]), NULL);
2053}
2054
2055SYSCTL_PROC(_kern, OID_AUTO, imgsrcinfo,
2056		CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_LOCKED,
2057		0, 0, sysctl_imgsrcinfo, "I", "");
2058
2059#endif /* CONFIG_IMGSRC_ACCESS */
2060
2061
2062SYSCTL_DECL(_kern_timer);
2063SYSCTL_NODE(_kern, OID_AUTO, timer, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "timer");
2064
2065
2066SYSCTL_INT(_kern_timer, OID_AUTO, coalescing_enabled,
2067		CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2068		&mach_timer_coalescing_enabled, 0, "");
2069
2070SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_1,
2071		CTLFLAG_RW | CTLFLAG_LOCKED,
2072		&timer_deadline_tracking_bin_1, "");
2073SYSCTL_QUAD(_kern_timer, OID_AUTO, deadline_tracking_bin_2,
2074		CTLFLAG_RW | CTLFLAG_LOCKED,
2075		&timer_deadline_tracking_bin_2, "");
2076
2077SYSCTL_DECL(_kern_timer_longterm);
2078SYSCTL_NODE(_kern_timer, OID_AUTO, longterm, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "longterm");
2079
2080
2081/* Must match definition in osfmk/kern/timer_call.c */
2082enum {
2083	THRESHOLD, QCOUNT,
2084	ENQUEUES, DEQUEUES, ESCALATES, SCANS, PREEMPTS,
2085	LATENCY, LATENCY_MIN, LATENCY_MAX
2086};
2087extern uint64_t	timer_sysctl_get(int);
2088extern int      timer_sysctl_set(int, uint64_t);
2089
2090STATIC int
2091sysctl_timer
2092(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2093{
2094	int		oid = (int)arg1;
2095	uint64_t	value = timer_sysctl_get(oid);
2096	uint64_t	new_value;
2097	int		error;
2098	int		changed;
2099
2100	error = sysctl_io_number(req, value, sizeof(value), &new_value, &changed);
2101	if (changed)
2102		error = timer_sysctl_set(oid, new_value);
2103
2104	return error;
2105}
2106
2107SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, threshold,
2108		CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED,
2109		(void *) THRESHOLD, 0, sysctl_timer, "Q", "");
2110SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, qlen,
2111		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2112		(void *) QCOUNT, 0, sysctl_timer, "Q", "");
2113#if DEBUG
2114SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, enqueues,
2115		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2116		(void *) ENQUEUES, 0, sysctl_timer, "Q", "");
2117SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, dequeues,
2118		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2119		(void *) DEQUEUES, 0, sysctl_timer, "Q", "");
2120SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, escalates,
2121		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2122		(void *) ESCALATES, 0, sysctl_timer, "Q", "");
2123SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, scans,
2124		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2125		(void *) SCANS, 0, sysctl_timer, "Q", "");
2126SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, preempts,
2127		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2128		(void *) PREEMPTS, 0, sysctl_timer, "Q", "");
2129SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency,
2130		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2131		(void *) LATENCY, 0, sysctl_timer, "Q", "");
2132SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_min,
2133		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2134		(void *) LATENCY_MIN, 0, sysctl_timer, "Q", "");
2135SYSCTL_PROC(_kern_timer_longterm, OID_AUTO, latency_max,
2136		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2137		(void *) LATENCY_MAX, 0, sysctl_timer, "Q", "");
2138#endif /* DEBUG */
2139
2140STATIC int
2141sysctl_usrstack
2142(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2143{
2144	return sysctl_io_number(req, (int)req->p->user_stack, sizeof(int), NULL, NULL);
2145}
2146
2147SYSCTL_PROC(_kern, KERN_USRSTACK32, usrstack,
2148		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2149		0, 0, sysctl_usrstack, "I", "");
2150
2151STATIC int
2152sysctl_usrstack64
2153(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2154{
2155	return sysctl_io_number(req, req->p->user_stack, sizeof(req->p->user_stack), NULL, NULL);
2156}
2157
2158SYSCTL_PROC(_kern, KERN_USRSTACK64, usrstack64,
2159		CTLTYPE_QUAD | CTLFLAG_RD | CTLFLAG_LOCKED,
2160		0, 0, sysctl_usrstack64, "Q", "");
2161
2162SYSCTL_STRING(_kern, KERN_COREFILE, corefile,
2163		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2164		corefilename, sizeof(corefilename), "");
2165
2166STATIC int
2167sysctl_coredump
2168(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2169{
2170#ifdef SECURE_KERNEL
2171	(void)req;
2172	return (ENOTSUP);
2173#else
2174	int new_value, changed;
2175	int error = sysctl_io_number(req, do_coredump, sizeof(int), &new_value, &changed);
2176	if (changed) {
2177		if ((new_value == 0) || (new_value == 1))
2178			do_coredump = new_value;
2179		else
2180			error = EINVAL;
2181	}
2182	return(error);
2183#endif
2184}
2185
2186SYSCTL_PROC(_kern, KERN_COREDUMP, coredump,
2187		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2188		0, 0, sysctl_coredump, "I", "");
2189
2190STATIC int
2191sysctl_suid_coredump
2192(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2193{
2194#ifdef SECURE_KERNEL
2195	(void)req;
2196	return (ENOTSUP);
2197#else
2198	int new_value, changed;
2199	int error = sysctl_io_number(req, sugid_coredump, sizeof(int), &new_value, &changed);
2200	if (changed) {
2201		if ((new_value == 0) || (new_value == 1))
2202			sugid_coredump = new_value;
2203		else
2204			error = EINVAL;
2205	}
2206	return(error);
2207#endif
2208}
2209
2210SYSCTL_PROC(_kern, KERN_SUGID_COREDUMP, sugid_coredump,
2211		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2212		0, 0, sysctl_suid_coredump, "I", "");
2213
2214STATIC int
2215sysctl_delayterm
2216(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2217{
2218	struct proc *p = req->p;
2219	int new_value, changed;
2220	int error = sysctl_io_number(req, (req->p->p_lflag & P_LDELAYTERM)? 1: 0, sizeof(int), &new_value, &changed);
2221	if (changed) {
2222		proc_lock(p);
2223		if (new_value)
2224			req->p->p_lflag |=  P_LDELAYTERM;
2225		else
2226			req->p->p_lflag &=  ~P_LDELAYTERM;
2227		proc_unlock(p);
2228	}
2229	return(error);
2230}
2231
2232SYSCTL_PROC(_kern, KERN_PROCDELAYTERM, delayterm,
2233		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2234		0, 0, sysctl_delayterm, "I", "");
2235
2236
2237STATIC int
2238sysctl_rage_vnode
2239(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2240{
2241	struct proc *p = req->p;
2242        struct  uthread *ut;
2243	int new_value, old_value, changed;
2244	int error;
2245
2246	ut = get_bsdthread_info(current_thread());
2247
2248	if (ut->uu_flag & UT_RAGE_VNODES)
2249	        old_value = KERN_RAGE_THREAD;
2250	else if (p->p_lflag & P_LRAGE_VNODES)
2251	        old_value = KERN_RAGE_PROC;
2252	else
2253	        old_value = 0;
2254
2255	error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2256
2257	if (error == 0) {
2258	        switch (new_value) {
2259		case KERN_RAGE_PROC:
2260		        proc_lock(p);
2261			p->p_lflag |= P_LRAGE_VNODES;
2262			proc_unlock(p);
2263			break;
2264		case KERN_UNRAGE_PROC:
2265		        proc_lock(p);
2266			p->p_lflag &= ~P_LRAGE_VNODES;
2267			proc_unlock(p);
2268			break;
2269
2270		case KERN_RAGE_THREAD:
2271			ut->uu_flag |= UT_RAGE_VNODES;
2272			break;
2273		case KERN_UNRAGE_THREAD:
2274		        ut = get_bsdthread_info(current_thread());
2275			ut->uu_flag &= ~UT_RAGE_VNODES;
2276			break;
2277		}
2278	}
2279	return(error);
2280}
2281
2282SYSCTL_PROC(_kern, KERN_RAGEVNODE, rage_vnode,
2283		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2284		0, 0, sysctl_rage_vnode, "I", "");
2285
2286/* XXX move this interface into libproc and remove this sysctl */
2287STATIC int
2288sysctl_setthread_cpupercent
2289(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2290{
2291	int new_value, old_value;
2292	int error = 0;
2293	kern_return_t kret = KERN_SUCCESS;
2294	uint8_t percent = 0;
2295	int ms_refill = 0;
2296
2297	if (!req->newptr)
2298		return (0);
2299
2300	old_value = 0;
2301
2302	if ((error = sysctl_io_number(req, old_value, sizeof(old_value), &new_value, NULL)) != 0)
2303		return (error);
2304
2305	percent = new_value & 0xff;			/* low 8 bytes for perent */
2306	ms_refill = (new_value >> 8) & 0xffffff;	/* upper 24bytes represent ms refill value */
2307	if (percent > 100)
2308		return (EINVAL);
2309
2310	/*
2311	 * If the caller is specifying a percentage of 0, this will unset the CPU limit, if present.
2312	 */
2313	if ((kret = thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, percent, ms_refill * (int)NSEC_PER_MSEC)) != 0)
2314		return (EIO);
2315
2316	return (0);
2317}
2318
2319SYSCTL_PROC(_kern, OID_AUTO, setthread_cpupercent,
2320		CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_ANYBODY,
2321		0, 0, sysctl_setthread_cpupercent, "I", "set thread cpu percentage limit");
2322
2323
2324STATIC int
2325sysctl_kern_check_openevt
2326(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2327{
2328	struct proc *p = req->p;
2329	int new_value, old_value, changed;
2330	int error;
2331
2332	if (p->p_flag & P_CHECKOPENEVT) {
2333		old_value = KERN_OPENEVT_PROC;
2334	} else {
2335	        old_value = 0;
2336	}
2337
2338	error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2339
2340	if (error == 0) {
2341	        switch (new_value) {
2342		case KERN_OPENEVT_PROC:
2343			OSBitOrAtomic(P_CHECKOPENEVT, &p->p_flag);
2344			break;
2345
2346		case KERN_UNOPENEVT_PROC:
2347			OSBitAndAtomic(~((uint32_t)P_CHECKOPENEVT), &p->p_flag);
2348			break;
2349
2350		default:
2351			error = EINVAL;
2352		}
2353	}
2354	return(error);
2355}
2356
2357SYSCTL_PROC(_kern, KERN_CHECKOPENEVT, check_openevt, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_LOCKED,
2358            0, 0, sysctl_kern_check_openevt, "I", "set the per-process check-open-evt flag");
2359
2360
2361
2362STATIC int
2363sysctl_nx
2364(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2365{
2366#ifdef SECURE_KERNEL
2367	(void)req;
2368	return ENOTSUP;
2369#else
2370	int new_value, changed;
2371	int error;
2372
2373	error = sysctl_io_number(req, nx_enabled, sizeof(nx_enabled), &new_value, &changed);
2374	if (error)
2375		return error;
2376
2377	if (changed) {
2378#if defined(__i386__) || defined(__x86_64__)
2379		/*
2380		 * Only allow setting if NX is supported on the chip
2381		 */
2382		if (!(cpuid_extfeatures() & CPUID_EXTFEATURE_XD))
2383			return ENOTSUP;
2384#endif
2385		nx_enabled = new_value;
2386	}
2387	return(error);
2388#endif /* SECURE_KERNEL */
2389}
2390
2391
2392
2393SYSCTL_PROC(_kern, KERN_NX_PROTECTION, nx,
2394		CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2395		0, 0, sysctl_nx, "I", "");
2396
2397STATIC int
2398sysctl_loadavg
2399(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2400{
2401		if (proc_is64bit(req->p)) {
2402			struct user64_loadavg loadinfo64;
2403			fill_loadavg64(&averunnable, &loadinfo64);
2404			return sysctl_io_opaque(req, &loadinfo64, sizeof(loadinfo64), NULL);
2405		} else {
2406			struct user32_loadavg loadinfo32;
2407			fill_loadavg32(&averunnable, &loadinfo32);
2408			return sysctl_io_opaque(req, &loadinfo32, sizeof(loadinfo32), NULL);
2409		}
2410}
2411
2412SYSCTL_PROC(_vm, VM_LOADAVG, loadavg,
2413		CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2414		0, 0, sysctl_loadavg, "S,loadavg", "");
2415
2416/*
2417 * Note:	Thread safe; vm_map_lock protects in  vm_toggle_entry_reuse()
2418 */
2419STATIC int
2420sysctl_vm_toggle_address_reuse(__unused struct sysctl_oid *oidp, __unused void *arg1,
2421	      __unused int arg2, struct sysctl_req *req)
2422{
2423	int old_value=0, new_value=0, error=0;
2424
2425	if(vm_toggle_entry_reuse( VM_TOGGLE_GETVALUE, &old_value ))
2426		return(error);
2427	error = sysctl_io_number(req, old_value, sizeof(int), &new_value, NULL);
2428	if (!error) {
2429		return (vm_toggle_entry_reuse(new_value, NULL));
2430	}
2431	return(error);
2432}
2433
2434SYSCTL_PROC(_debug, OID_AUTO, toggle_address_reuse, CTLFLAG_ANYBODY | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 0, 0, sysctl_vm_toggle_address_reuse,"I","");
2435
2436STATIC int
2437sysctl_swapusage
2438(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2439{
2440		int			error;
2441		uint64_t		swap_total;
2442		uint64_t		swap_avail;
2443		vm_size_t		swap_pagesize;
2444		boolean_t		swap_encrypted;
2445		struct xsw_usage	xsu;
2446
2447		error = macx_swapinfo(&swap_total,
2448				      &swap_avail,
2449				      &swap_pagesize,
2450				      &swap_encrypted);
2451		if (error)
2452			return error;
2453
2454		xsu.xsu_total = swap_total;
2455		xsu.xsu_avail = swap_avail;
2456		xsu.xsu_used = swap_total - swap_avail;
2457		xsu.xsu_pagesize = swap_pagesize;
2458		xsu.xsu_encrypted = swap_encrypted;
2459		return sysctl_io_opaque(req, &xsu, sizeof(xsu), NULL);
2460}
2461
2462
2463
2464SYSCTL_PROC(_vm, VM_SWAPUSAGE, swapusage,
2465		CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
2466		0, 0, sysctl_swapusage, "S,xsw_usage", "");
2467
2468#if CONFIG_FREEZE
2469extern void vm_page_reactivate_all_throttled(void);
2470
2471static int
2472sysctl_freeze_enabled SYSCTL_HANDLER_ARGS
2473{
2474#pragma unused(arg1, arg2)
2475	int error, val = memorystatus_freeze_enabled ? 1 : 0;
2476	boolean_t disabled;
2477
2478	error = sysctl_handle_int(oidp, &val, 0, req);
2479	if (error || !req->newptr)
2480 		return (error);
2481
2482	if (COMPRESSED_PAGER_IS_ACTIVE || DEFAULT_FREEZER_COMPRESSED_PAGER_IS_ACTIVE) {
2483		//assert(req->newptr);
2484		printf("Failed attempt to set vm.freeze_enabled sysctl\n");
2485		return EINVAL;
2486	}
2487
2488	/*
2489	 * If freeze is being disabled, we need to move dirty pages out from the throttle to the active queue.
2490	 */
2491	disabled = (!val && memorystatus_freeze_enabled);
2492
2493	memorystatus_freeze_enabled = val ? TRUE : FALSE;
2494
2495	if (disabled) {
2496		vm_page_reactivate_all_throttled();
2497	}
2498
2499	return (0);
2500}
2501
2502SYSCTL_PROC(_vm, OID_AUTO, freeze_enabled, CTLTYPE_INT|CTLFLAG_RW, &memorystatus_freeze_enabled, 0, sysctl_freeze_enabled, "I", "");
2503#endif /* CONFIG_FREEZE */
2504
2505/* this kernel does NOT implement shared_region_make_private_np() */
2506SYSCTL_INT(_kern, KERN_SHREG_PRIVATIZABLE, shreg_private,
2507		CTLFLAG_RD | CTLFLAG_LOCKED,
2508		(int *)NULL, 0, "");
2509
2510STATIC int
2511fetch_process_cputype(
2512	proc_t cur_proc,
2513	int *name,
2514	u_int namelen,
2515	cpu_type_t *cputype)
2516{
2517	proc_t p = PROC_NULL;
2518	int refheld = 0;
2519	cpu_type_t ret = 0;
2520	int error = 0;
2521
2522	if (namelen == 0)
2523		p = cur_proc;
2524	else if (namelen == 1) {
2525		p = proc_find(name[0]);
2526		if (p == NULL)
2527			return (EINVAL);
2528		refheld = 1;
2529	} else {
2530		error = EINVAL;
2531		goto out;
2532	}
2533
2534	ret = cpu_type() & ~CPU_ARCH_MASK;
2535	if (IS_64BIT_PROCESS(p))
2536		ret |= CPU_ARCH_ABI64;
2537
2538	*cputype = ret;
2539
2540	if (refheld != 0)
2541		proc_rele(p);
2542out:
2543	return (error);
2544}
2545
2546STATIC int
2547sysctl_sysctl_native(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2548		    struct sysctl_req *req)
2549{
2550	int error;
2551	cpu_type_t proc_cputype = 0;
2552	if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2553		return error;
2554	int res = 1;
2555	if ((proc_cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK))
2556		res = 0;
2557	return SYSCTL_OUT(req, &res, sizeof(res));
2558}
2559SYSCTL_PROC(_sysctl, OID_AUTO, proc_native, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_native ,"I","proc_native");
2560
2561STATIC int
2562sysctl_sysctl_cputype(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
2563		     struct sysctl_req *req)
2564{
2565	int error;
2566	cpu_type_t proc_cputype = 0;
2567	if ((error = fetch_process_cputype(req->p, (int *)arg1, arg2, &proc_cputype)) != 0)
2568		return error;
2569	return SYSCTL_OUT(req, &proc_cputype, sizeof(proc_cputype));
2570}
2571SYSCTL_PROC(_sysctl, OID_AUTO, proc_cputype, CTLTYPE_NODE|CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, sysctl_sysctl_cputype ,"I","proc_cputype");
2572
2573STATIC int
2574sysctl_safeboot
2575(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2576{
2577	return sysctl_io_number(req, boothowto & RB_SAFEBOOT ? 1 : 0, sizeof(int), NULL, NULL);
2578}
2579
2580SYSCTL_PROC(_kern, KERN_SAFEBOOT, safeboot,
2581		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2582		0, 0, sysctl_safeboot, "I", "");
2583
2584STATIC int
2585sysctl_singleuser
2586(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2587{
2588	return sysctl_io_number(req, boothowto & RB_SINGLE ? 1 : 0, sizeof(int), NULL, NULL);
2589}
2590
2591SYSCTL_PROC(_kern, OID_AUTO, singleuser,
2592		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2593		0, 0, sysctl_singleuser, "I", "");
2594
2595/*
2596 * Controls for debugging affinity sets - see osfmk/kern/affinity.c
2597 */
2598extern boolean_t	affinity_sets_enabled;
2599extern int		affinity_sets_mapping;
2600
2601SYSCTL_INT (_kern, OID_AUTO, affinity_sets_enabled,
2602	    CTLFLAG_RW | CTLFLAG_LOCKED, (int *) &affinity_sets_enabled, 0, "hinting enabled");
2603SYSCTL_INT (_kern, OID_AUTO, affinity_sets_mapping,
2604	    CTLFLAG_RW | CTLFLAG_LOCKED, &affinity_sets_mapping, 0, "mapping policy");
2605
2606/*
2607 * Boolean indicating if KASLR is active.
2608 */
2609STATIC int
2610sysctl_slide
2611(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req)
2612{
2613	uint32_t	slide;
2614
2615	slide = vm_kernel_slide ? 1 : 0;
2616
2617	return sysctl_io_number( req, slide, sizeof(int), NULL, NULL);
2618}
2619
2620SYSCTL_PROC(_kern, OID_AUTO, slide,
2621		CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
2622		0, 0, sysctl_slide, "I", "");
2623
2624/*
2625 * Limit on total memory users can wire.
2626 *
2627 * vm_global_user_wire_limit - system wide limit on wired memory from all processes combined.
2628 *
2629 * vm_user_wire_limit - per address space limit on wired memory.  This puts a cap on the process's rlimit value.
2630 *
2631 * These values are initialized to reasonable defaults at boot time based on the available physical memory in
2632 * kmem_init().
2633 *
2634 * All values are in bytes.
2635 */
2636
2637vm_map_size_t	vm_global_no_user_wire_amount;
2638vm_map_size_t	vm_global_user_wire_limit;
2639vm_map_size_t	vm_user_wire_limit;
2640
2641/*
2642 * There needs to be a more automatic/elegant way to do this
2643 */
2644SYSCTL_QUAD(_vm, OID_AUTO, global_no_user_wire_amount, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_no_user_wire_amount, "");
2645SYSCTL_QUAD(_vm, OID_AUTO, global_user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_global_user_wire_limit, "");
2646SYSCTL_QUAD(_vm, OID_AUTO, user_wire_limit, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_user_wire_limit, "");
2647
2648extern int vm_map_copy_overwrite_aligned_src_not_internal;
2649extern int vm_map_copy_overwrite_aligned_src_not_symmetric;
2650extern int vm_map_copy_overwrite_aligned_src_large;
2651SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_internal, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_internal, 0, "");
2652SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_not_symmetric, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_not_symmetric, 0, "");
2653SYSCTL_INT(_vm, OID_AUTO, vm_copy_src_large, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_map_copy_overwrite_aligned_src_large, 0, "");
2654
2655
2656extern uint32_t	vm_page_external_count;
2657extern uint32_t	vm_page_filecache_min;
2658
2659SYSCTL_INT(_vm, OID_AUTO, vm_page_external_count, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_page_external_count, 0, "");
2660SYSCTL_INT(_vm, OID_AUTO, vm_page_filecache_min, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_page_filecache_min, 0, "");
2661
2662extern int	vm_compressor_mode;
2663extern uint32_t	swapout_target_age;
2664extern int64_t  compressor_bytes_used;
2665extern uint32_t	compressor_eval_period_in_msecs;
2666extern uint32_t	compressor_sample_min_in_msecs;
2667extern uint32_t	compressor_sample_max_in_msecs;
2668extern uint32_t	compressor_thrashing_threshold_per_10msecs;
2669extern uint32_t	compressor_thrashing_min_per_10msecs;
2670extern uint32_t	vm_compressor_minorcompact_threshold_divisor;
2671extern uint32_t	vm_compressor_majorcompact_threshold_divisor;
2672extern uint32_t	vm_compressor_unthrottle_threshold_divisor;
2673extern uint32_t	vm_compressor_catchup_threshold_divisor;
2674
2675SYSCTL_INT(_vm, OID_AUTO, compressor_mode, CTLFLAG_RD | CTLFLAG_LOCKED, &vm_compressor_mode, 0, "");
2676SYSCTL_QUAD(_vm, OID_AUTO, compressor_bytes_used, CTLFLAG_RD | CTLFLAG_LOCKED, &compressor_bytes_used, "");
2677SYSCTL_INT(_vm, OID_AUTO, compressor_swapout_target_age, CTLFLAG_RD | CTLFLAG_LOCKED, &swapout_target_age, 0, "");
2678
2679SYSCTL_INT(_vm, OID_AUTO, compressor_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_eval_period_in_msecs, 0, "");
2680SYSCTL_INT(_vm, OID_AUTO, compressor_sample_min_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_min_in_msecs, 0, "");
2681SYSCTL_INT(_vm, OID_AUTO, compressor_sample_max_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_sample_max_in_msecs, 0, "");
2682SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_threshold_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_threshold_per_10msecs, 0, "");
2683SYSCTL_INT(_vm, OID_AUTO, compressor_thrashing_min_per_10msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &compressor_thrashing_min_per_10msecs, 0, "");
2684SYSCTL_INT(_vm, OID_AUTO, compressor_minorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_minorcompact_threshold_divisor, 0, "");
2685SYSCTL_INT(_vm, OID_AUTO, compressor_majorcompact_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_majorcompact_threshold_divisor, 0, "");
2686SYSCTL_INT(_vm, OID_AUTO, compressor_unthrottle_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_unthrottle_threshold_divisor, 0, "");
2687SYSCTL_INT(_vm, OID_AUTO, compressor_catchup_threshold_divisor, CTLFLAG_RW | CTLFLAG_LOCKED, &vm_compressor_catchup_threshold_divisor, 0, "");
2688
2689SYSCTL_STRING(_vm, OID_AUTO, swapfileprefix, CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED, swapfilename, sizeof(swapfilename) - SWAPFILENAME_INDEX_LEN, "");
2690
2691#if CONFIG_PHANTOM_CACHE
2692extern uint32_t phantom_cache_thrashing_threshold;
2693extern uint32_t phantom_cache_eval_period_in_msecs;
2694extern uint32_t phantom_cache_thrashing_threshold_ssd;
2695
2696
2697SYSCTL_INT(_vm, OID_AUTO, phantom_cache_eval_period_in_msecs, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_eval_period_in_msecs, 0, "");
2698SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold, 0, "");
2699SYSCTL_INT(_vm, OID_AUTO, phantom_cache_thrashing_threshold_ssd, CTLFLAG_RW | CTLFLAG_LOCKED, &phantom_cache_thrashing_threshold_ssd, 0, "");
2700#endif
2701
2702/*
2703 * Enable tracing of voucher contents
2704 */
2705extern uint32_t ipc_voucher_trace_contents;
2706
2707SYSCTL_INT (_kern, OID_AUTO, ipc_voucher_trace_contents,
2708	    CTLFLAG_RW | CTLFLAG_LOCKED, &ipc_voucher_trace_contents, 0, "Enable tracing voucher contents");
2709
2710/*
2711 * Kernel stack size and depth
2712 */
2713SYSCTL_INT (_kern, OID_AUTO, stack_size,
2714	    CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_size, 0, "Kernel stack size");
2715SYSCTL_INT (_kern, OID_AUTO, stack_depth_max,
2716	    CTLFLAG_RD | CTLFLAG_LOCKED, (int *) &kernel_stack_depth_max, 0, "Max kernel stack depth at interrupt or context switch");
2717
2718/*
2719 * enable back trace for port allocations
2720 */
2721extern int ipc_portbt;
2722
2723SYSCTL_INT(_kern, OID_AUTO, ipc_portbt,
2724		CTLFLAG_RW | CTLFLAG_KERN | CTLFLAG_LOCKED,
2725		&ipc_portbt, 0, "");
2726
2727/*
2728 * Scheduler sysctls
2729 */
2730
2731/*
2732 * See osfmk/kern/sched_prim.c for the corresponding definition
2733 * in osfmk/. If either version changes, update the other.
2734 */
2735#define SCHED_STRING_MAX_LENGTH (48)
2736
2737extern char sched_string[SCHED_STRING_MAX_LENGTH];
2738SYSCTL_STRING(_kern, OID_AUTO, sched,
2739			  CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
2740			  sched_string, sizeof(sched_string),
2741			  "Timeshare scheduler implementation");
2742
2743/*
2744 * Only support runtime modification on embedded platforms
2745 * with development config enabled
2746 */
2747
2748
2749/* Parameters related to timer coalescing tuning, to be replaced
2750 * with a dedicated systemcall in the future.
2751 */
2752/* Enable processing pending timers in the context of any other interrupt
2753 * Coalescing tuning parameters for various thread/task attributes */
2754STATIC int
2755sysctl_timer_user_us_kernel_abstime SYSCTL_HANDLER_ARGS
2756{
2757#pragma unused(oidp)
2758	int size = arg2;	/* subcommand*/
2759	int error;
2760	int changed = 0;
2761	uint64_t old_value_ns;
2762	uint64_t new_value_ns;
2763	uint64_t value_abstime;
2764	if (size == sizeof(uint32_t))
2765		value_abstime = *((uint32_t *)arg1);
2766	else if (size == sizeof(uint64_t))
2767		value_abstime = *((uint64_t *)arg1);
2768	else return ENOTSUP;
2769
2770	absolutetime_to_nanoseconds(value_abstime, &old_value_ns);
2771	error = sysctl_io_number(req, old_value_ns, sizeof(old_value_ns), &new_value_ns, &changed);
2772	if ((error) || (!changed))
2773		return error;
2774
2775	nanoseconds_to_absolutetime(new_value_ns, &value_abstime);
2776	if (size == sizeof(uint32_t))
2777		*((uint32_t *)arg1) = (uint32_t)value_abstime;
2778	else
2779		*((uint64_t *)arg1) = value_abstime;
2780	return error;
2781}
2782
2783SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_bg_scale,
2784    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2785    &tcoal_prio_params.timer_coalesce_bg_shift, 0, "");
2786SYSCTL_PROC(_kern, OID_AUTO, timer_resort_threshold_ns,
2787    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2788    &tcoal_prio_params.timer_resort_threshold_abstime,
2789    sizeof(tcoal_prio_params.timer_resort_threshold_abstime),
2790    sysctl_timer_user_us_kernel_abstime,
2791    "Q", "");
2792SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_bg_ns_max,
2793    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2794    &tcoal_prio_params.timer_coalesce_bg_abstime_max,
2795    sizeof(tcoal_prio_params.timer_coalesce_bg_abstime_max),
2796    sysctl_timer_user_us_kernel_abstime,
2797    "Q", "");
2798
2799SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_kt_scale,
2800    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2801    &tcoal_prio_params.timer_coalesce_kt_shift, 0, "");
2802
2803SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_kt_ns_max,
2804    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2805    &tcoal_prio_params.timer_coalesce_kt_abstime_max,
2806    sizeof(tcoal_prio_params.timer_coalesce_kt_abstime_max),
2807    sysctl_timer_user_us_kernel_abstime,
2808    "Q", "");
2809
2810SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_fp_scale,
2811    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2812    &tcoal_prio_params.timer_coalesce_fp_shift, 0, "");
2813
2814SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_fp_ns_max,
2815    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2816    &tcoal_prio_params.timer_coalesce_fp_abstime_max,
2817    sizeof(tcoal_prio_params.timer_coalesce_fp_abstime_max),
2818   sysctl_timer_user_us_kernel_abstime,
2819    "Q", "");
2820
2821SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_ts_scale,
2822    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2823    &tcoal_prio_params.timer_coalesce_ts_shift, 0, "");
2824
2825SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_ts_ns_max,
2826    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2827    &tcoal_prio_params.timer_coalesce_ts_abstime_max,
2828    sizeof(tcoal_prio_params.timer_coalesce_ts_abstime_max),
2829    sysctl_timer_user_us_kernel_abstime,
2830    "Q", "");
2831
2832SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier0_scale,
2833    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2834    &tcoal_prio_params.latency_qos_scale[0], 0, "");
2835
2836SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier0_ns_max,
2837    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2838    &tcoal_prio_params.latency_qos_abstime_max[0],
2839    sizeof(tcoal_prio_params.latency_qos_abstime_max[0]),
2840    sysctl_timer_user_us_kernel_abstime,
2841    "Q", "");
2842
2843SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier1_scale,
2844    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2845    &tcoal_prio_params.latency_qos_scale[1], 0, "");
2846
2847SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier1_ns_max,
2848    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2849    &tcoal_prio_params.latency_qos_abstime_max[1],
2850    sizeof(tcoal_prio_params.latency_qos_abstime_max[1]),
2851    sysctl_timer_user_us_kernel_abstime,
2852    "Q", "");
2853
2854SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier2_scale,
2855    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2856    &tcoal_prio_params.latency_qos_scale[2], 0, "");
2857
2858SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier2_ns_max,
2859    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2860    &tcoal_prio_params.latency_qos_abstime_max[2],
2861    sizeof(tcoal_prio_params.latency_qos_abstime_max[2]),
2862    sysctl_timer_user_us_kernel_abstime,
2863    "Q", "");
2864
2865SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier3_scale,
2866    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2867    &tcoal_prio_params.latency_qos_scale[3], 0, "");
2868
2869SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier3_ns_max,
2870    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2871    &tcoal_prio_params.latency_qos_abstime_max[3],
2872    sizeof(tcoal_prio_params.latency_qos_abstime_max[3]),
2873    sysctl_timer_user_us_kernel_abstime,
2874    "Q", "");
2875
2876SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier4_scale,
2877    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2878    &tcoal_prio_params.latency_qos_scale[4], 0, "");
2879
2880SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier4_ns_max,
2881    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2882    &tcoal_prio_params.latency_qos_abstime_max[4],
2883    sizeof(tcoal_prio_params.latency_qos_abstime_max[4]),
2884    sysctl_timer_user_us_kernel_abstime,
2885    "Q", "");
2886
2887SYSCTL_INT(_kern, OID_AUTO, timer_coalesce_tier5_scale,
2888    CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2889    &tcoal_prio_params.latency_qos_scale[5], 0, "");
2890
2891SYSCTL_PROC(_kern, OID_AUTO, timer_coalesce_tier5_ns_max,
2892    CTLTYPE_QUAD | CTLFLAG_KERN | CTLFLAG_RW | CTLFLAG_LOCKED,
2893    &tcoal_prio_params.latency_qos_abstime_max[5],
2894    sizeof(tcoal_prio_params.latency_qos_abstime_max[5]),
2895    sysctl_timer_user_us_kernel_abstime,
2896    "Q", "");
2897
2898/* Communicate the "user idle level" heuristic to the timer layer, and
2899 * potentially other layers in the future.
2900 */
2901
2902static int
2903timer_user_idle_level(__unused struct sysctl_oid *oidp, __unused void *arg1, __unused int arg2, struct sysctl_req *req) {
2904	int new_value = 0, old_value = 0, changed = 0, error;
2905
2906	old_value = timer_get_user_idle_level();
2907
2908	error = sysctl_io_number(req, old_value, sizeof(int), &new_value, &changed);
2909
2910	if (error == 0 && changed) {
2911		if (timer_set_user_idle_level(new_value) != KERN_SUCCESS)
2912			error = ERANGE;
2913	}
2914
2915	return error;
2916}
2917
2918SYSCTL_PROC(_machdep, OID_AUTO, user_idle_level,
2919    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
2920    0, 0,
2921    timer_user_idle_level, "I", "User idle level heuristic, 0-128");
2922
2923#if HYPERVISOR
2924SYSCTL_INT(_kern, OID_AUTO, hv_support,
2925		CTLFLAG_KERN | CTLFLAG_RD | CTLFLAG_LOCKED,
2926		&hv_support_available, 0, "");
2927#endif
2928