kern_sysctl.c revision 286094
1/*-
2 * Copyright (c) 1982, 1986, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * Mike Karels at Berkeley Software Design, Inc.
7 *
8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD
9 * project, to make these variables more userfriendly.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)kern_sysctl.c	8.4 (Berkeley) 4/14/94
36 */
37
38#include <sys/cdefs.h>
39__FBSDID("$FreeBSD: head/sys/kern/kern_sysctl.c 286094 2015-07-30 19:52:43Z mjg $");
40
41#include "opt_capsicum.h"
42#include "opt_compat.h"
43#include "opt_ktrace.h"
44
45#include <sys/param.h>
46#include <sys/fail.h>
47#include <sys/systm.h>
48#include <sys/capsicum.h>
49#include <sys/kernel.h>
50#include <sys/sysctl.h>
51#include <sys/malloc.h>
52#include <sys/priv.h>
53#include <sys/proc.h>
54#include <sys/jail.h>
55#include <sys/lock.h>
56#include <sys/mutex.h>
57#include <sys/sbuf.h>
58#include <sys/sx.h>
59#include <sys/sysproto.h>
60#include <sys/uio.h>
61#ifdef KTRACE
62#include <sys/ktrace.h>
63#endif
64
65#include <net/vnet.h>
66
67#include <security/mac/mac_framework.h>
68
69#include <vm/vm.h>
70#include <vm/vm_extern.h>
71
72static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic");
73static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids");
74static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer");
75
76/*
77 * The sysctllock protects the MIB tree.  It also protects sysctl
78 * contexts used with dynamic sysctls.  The sysctl_register_oid() and
79 * sysctl_unregister_oid() routines require the sysctllock to already
80 * be held, so the sysctl_xlock() and sysctl_xunlock() routines are
81 * provided for the few places in the kernel which need to use that
82 * API rather than using the dynamic API.  Use of the dynamic API is
83 * strongly encouraged for most code.
84 *
85 * The sysctlmemlock is used to limit the amount of user memory wired for
86 * sysctl requests.  This is implemented by serializing any userland
87 * sysctl requests larger than a single page via an exclusive lock.
88 */
89static struct sx sysctllock;
90static struct sx sysctlmemlock;
91
92#define	SYSCTL_XLOCK()		sx_xlock(&sysctllock)
93#define	SYSCTL_XUNLOCK()	sx_xunlock(&sysctllock)
94#define	SYSCTL_SLOCK()		sx_slock(&sysctllock)
95#define	SYSCTL_SUNLOCK()	sx_sunlock(&sysctllock)
96#define	SYSCTL_XLOCKED()	sx_xlocked(&sysctllock)
97#define	SYSCTL_ASSERT_LOCKED()	sx_assert(&sysctllock, SA_LOCKED)
98#define	SYSCTL_ASSERT_XLOCKED()	sx_assert(&sysctllock, SA_XLOCKED)
99#define	SYSCTL_ASSERT_SLOCKED()	sx_assert(&sysctllock, SA_SLOCKED)
100#define	SYSCTL_INIT()		sx_init(&sysctllock, "sysctl lock")
101#define	SYSCTL_SLEEP(ch, wmesg, timo)					\
102				sx_sleep(ch, &sysctllock, 0, wmesg, timo)
103
104static int sysctl_root(SYSCTL_HANDLER_ARGS);
105
106/* Root list */
107struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children);
108
109static int	sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del,
110		    int recurse);
111static int	sysctl_old_kernel(struct sysctl_req *, const void *, size_t);
112static int	sysctl_new_kernel(struct sysctl_req *, void *, size_t);
113
114static void
115sysctl_lock(bool xlock)
116{
117
118	if (xlock)
119		SYSCTL_XLOCK();
120	else
121		SYSCTL_SLOCK();
122}
123
124static bool
125sysctl_unlock(void)
126{
127	bool xlocked;
128
129	xlocked = SYSCTL_XLOCKED();
130	if (xlocked)
131		SYSCTL_XUNLOCK();
132	else
133		SYSCTL_SUNLOCK();
134	return (xlocked);
135}
136
137static struct sysctl_oid *
138sysctl_find_oidname(const char *name, struct sysctl_oid_list *list)
139{
140	struct sysctl_oid *oidp;
141
142	SYSCTL_ASSERT_LOCKED();
143	SLIST_FOREACH(oidp, list, oid_link) {
144		if (strcmp(oidp->oid_name, name) == 0) {
145			return (oidp);
146		}
147	}
148	return (NULL);
149}
150
151/*
152 * Initialization of the MIB tree.
153 *
154 * Order by number in each list.
155 */
156void
157sysctl_xlock(void)
158{
159
160	SYSCTL_XLOCK();
161}
162
163void
164sysctl_xunlock(void)
165{
166
167	SYSCTL_XUNLOCK();
168}
169
170static int
171sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intptr_t arg2,
172    struct sysctl_req *req)
173{
174	int error;
175	bool xlocked;
176
177	if (oid->oid_kind & CTLFLAG_DYN)
178		atomic_add_int(&oid->oid_running, 1);
179	xlocked = sysctl_unlock();
180
181	if (!(oid->oid_kind & CTLFLAG_MPSAFE))
182		mtx_lock(&Giant);
183	error = oid->oid_handler(oid, arg1, arg2, req);
184	if (!(oid->oid_kind & CTLFLAG_MPSAFE))
185		mtx_unlock(&Giant);
186
187	sysctl_lock(xlocked);
188	if (oid->oid_kind & CTLFLAG_DYN) {
189		if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 &&
190		    (oid->oid_kind & CTLFLAG_DYING) != 0)
191			wakeup(&oid->oid_running);
192	}
193
194	return (error);
195}
196
197static void
198sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp)
199{
200	struct sysctl_req req;
201	struct sysctl_oid *curr;
202	char *penv = NULL;
203	char path[64];
204	ssize_t rem = sizeof(path);
205	ssize_t len;
206	int val_int;
207	long val_long;
208	int64_t val_64;
209	quad_t val_quad;
210	int error;
211
212	path[--rem] = 0;
213
214	for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) {
215		len = strlen(curr->oid_name);
216		rem -= len;
217		if (curr != oidp)
218			rem -= 1;
219		if (rem < 0) {
220			printf("OID path exceeds %d bytes\n", (int)sizeof(path));
221			return;
222		}
223		memcpy(path + rem, curr->oid_name, len);
224		if (curr != oidp)
225			path[rem + len] = '.';
226	}
227
228	memset(&req, 0, sizeof(req));
229
230	req.td = curthread;
231	req.oldfunc = sysctl_old_kernel;
232	req.newfunc = sysctl_new_kernel;
233	req.lock = REQ_UNWIRED;
234
235	switch (oidp->oid_kind & CTLTYPE) {
236	case CTLTYPE_INT:
237		if (getenv_int(path + rem, &val_int) == 0)
238			return;
239		req.newlen = sizeof(val_int);
240		req.newptr = &val_int;
241		break;
242	case CTLTYPE_UINT:
243		if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0)
244			return;
245		req.newlen = sizeof(val_int);
246		req.newptr = &val_int;
247		break;
248	case CTLTYPE_LONG:
249		if (getenv_long(path + rem, &val_long) == 0)
250			return;
251		req.newlen = sizeof(val_long);
252		req.newptr = &val_long;
253		break;
254	case CTLTYPE_ULONG:
255		if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0)
256			return;
257		req.newlen = sizeof(val_long);
258		req.newptr = &val_long;
259		break;
260	case CTLTYPE_S64:
261		if (getenv_quad(path + rem, &val_quad) == 0)
262			return;
263		val_64 = val_quad;
264		req.newlen = sizeof(val_64);
265		req.newptr = &val_64;
266		break;
267	case CTLTYPE_U64:
268		/* XXX there is no getenv_uquad() */
269		if (getenv_quad(path + rem, &val_quad) == 0)
270			return;
271		val_64 = val_quad;
272		req.newlen = sizeof(val_64);
273		req.newptr = &val_64;
274		break;
275	case CTLTYPE_STRING:
276		penv = kern_getenv(path + rem);
277		if (penv == NULL)
278			return;
279		req.newlen = strlen(penv);
280		req.newptr = penv;
281		break;
282	default:
283		return;
284	}
285	error = sysctl_root_handler_locked(oidp, oidp->oid_arg1,
286	    oidp->oid_arg2, &req);
287	if (error != 0)
288		printf("Setting sysctl %s failed: %d\n", path + rem, error);
289	if (penv != NULL)
290		freeenv(penv);
291}
292
293void
294sysctl_register_oid(struct sysctl_oid *oidp)
295{
296	struct sysctl_oid_list *parent = oidp->oid_parent;
297	struct sysctl_oid *p;
298	struct sysctl_oid *q;
299	int oid_number;
300	int timeout = 2;
301
302	/*
303	 * First check if another oid with the same name already
304	 * exists in the parent's list.
305	 */
306	SYSCTL_ASSERT_XLOCKED();
307	p = sysctl_find_oidname(oidp->oid_name, parent);
308	if (p != NULL) {
309		if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
310			p->oid_refcnt++;
311			return;
312		} else {
313			printf("can't re-use a leaf (%s)!\n", p->oid_name);
314			return;
315		}
316	}
317	/* get current OID number */
318	oid_number = oidp->oid_number;
319
320#if (OID_AUTO >= 0)
321#error "OID_AUTO is expected to be a negative value"
322#endif
323	/*
324	 * Any negative OID number qualifies as OID_AUTO. Valid OID
325	 * numbers should always be positive.
326	 *
327	 * NOTE: DO NOT change the starting value here, change it in
328	 * <sys/sysctl.h>, and make sure it is at least 256 to
329	 * accomodate e.g. net.inet.raw as a static sysctl node.
330	 */
331	if (oid_number < 0) {
332		static int newoid;
333
334		/*
335		 * By decrementing the next OID number we spend less
336		 * time inserting the OIDs into a sorted list.
337		 */
338		if (--newoid < CTL_AUTO_START)
339			newoid = 0x7fffffff;
340
341		oid_number = newoid;
342	}
343
344	/*
345	 * Insert the OID into the parent's list sorted by OID number.
346	 */
347retry:
348	q = NULL;
349	SLIST_FOREACH(p, parent, oid_link) {
350		/* check if the current OID number is in use */
351		if (oid_number == p->oid_number) {
352			/* get the next valid OID number */
353			if (oid_number < CTL_AUTO_START ||
354			    oid_number == 0x7fffffff) {
355				/* wraparound - restart */
356				oid_number = CTL_AUTO_START;
357				/* don't loop forever */
358				if (!timeout--)
359					panic("sysctl: Out of OID numbers\n");
360				goto retry;
361			} else {
362				oid_number++;
363			}
364		} else if (oid_number < p->oid_number)
365			break;
366		q = p;
367	}
368	/* check for non-auto OID number collision */
369	if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START &&
370	    oid_number >= CTL_AUTO_START) {
371		printf("sysctl: OID number(%d) is already in use for '%s'\n",
372		    oidp->oid_number, oidp->oid_name);
373	}
374	/* update the OID number, if any */
375	oidp->oid_number = oid_number;
376	if (q != NULL)
377		SLIST_INSERT_AFTER(q, oidp, oid_link);
378	else
379		SLIST_INSERT_HEAD(parent, oidp, oid_link);
380
381	if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE &&
382#ifdef VIMAGE
383	    (oidp->oid_kind & CTLFLAG_VNET) == 0 &&
384#endif
385	    (oidp->oid_kind & CTLFLAG_TUN) != 0 &&
386	    (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) {
387		/* only fetch value once */
388		oidp->oid_kind |= CTLFLAG_NOFETCH;
389		/* try to fetch value from kernel environment */
390		sysctl_load_tunable_by_oid_locked(oidp);
391	}
392}
393
394void
395sysctl_unregister_oid(struct sysctl_oid *oidp)
396{
397	struct sysctl_oid *p;
398	int error;
399
400	SYSCTL_ASSERT_XLOCKED();
401	error = ENOENT;
402	if (oidp->oid_number == OID_AUTO) {
403		error = EINVAL;
404	} else {
405		SLIST_FOREACH(p, oidp->oid_parent, oid_link) {
406			if (p == oidp) {
407				SLIST_REMOVE(oidp->oid_parent, oidp,
408				    sysctl_oid, oid_link);
409				error = 0;
410				break;
411			}
412		}
413	}
414
415	/*
416	 * This can happen when a module fails to register and is
417	 * being unloaded afterwards.  It should not be a panic()
418	 * for normal use.
419	 */
420	if (error)
421		printf("%s: failed to unregister sysctl\n", __func__);
422}
423
424/* Initialize a new context to keep track of dynamically added sysctls. */
425int
426sysctl_ctx_init(struct sysctl_ctx_list *c)
427{
428
429	if (c == NULL) {
430		return (EINVAL);
431	}
432
433	/*
434	 * No locking here, the caller is responsible for not adding
435	 * new nodes to a context until after this function has
436	 * returned.
437	 */
438	TAILQ_INIT(c);
439	return (0);
440}
441
442/* Free the context, and destroy all dynamic oids registered in this context */
443int
444sysctl_ctx_free(struct sysctl_ctx_list *clist)
445{
446	struct sysctl_ctx_entry *e, *e1;
447	int error;
448
449	error = 0;
450	/*
451	 * First perform a "dry run" to check if it's ok to remove oids.
452	 * XXX FIXME
453	 * XXX This algorithm is a hack. But I don't know any
454	 * XXX better solution for now...
455	 */
456	SYSCTL_XLOCK();
457	TAILQ_FOREACH(e, clist, link) {
458		error = sysctl_remove_oid_locked(e->entry, 0, 0);
459		if (error)
460			break;
461	}
462	/*
463	 * Restore deregistered entries, either from the end,
464	 * or from the place where error occured.
465	 * e contains the entry that was not unregistered
466	 */
467	if (error)
468		e1 = TAILQ_PREV(e, sysctl_ctx_list, link);
469	else
470		e1 = TAILQ_LAST(clist, sysctl_ctx_list);
471	while (e1 != NULL) {
472		sysctl_register_oid(e1->entry);
473		e1 = TAILQ_PREV(e1, sysctl_ctx_list, link);
474	}
475	if (error) {
476		SYSCTL_XUNLOCK();
477		return(EBUSY);
478	}
479	/* Now really delete the entries */
480	e = TAILQ_FIRST(clist);
481	while (e != NULL) {
482		e1 = TAILQ_NEXT(e, link);
483		error = sysctl_remove_oid_locked(e->entry, 1, 0);
484		if (error)
485			panic("sysctl_remove_oid: corrupt tree, entry: %s",
486			    e->entry->oid_name);
487		free(e, M_SYSCTLOID);
488		e = e1;
489	}
490	SYSCTL_XUNLOCK();
491	return (error);
492}
493
494/* Add an entry to the context */
495struct sysctl_ctx_entry *
496sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
497{
498	struct sysctl_ctx_entry *e;
499
500	SYSCTL_ASSERT_XLOCKED();
501	if (clist == NULL || oidp == NULL)
502		return(NULL);
503	e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK);
504	e->entry = oidp;
505	TAILQ_INSERT_HEAD(clist, e, link);
506	return (e);
507}
508
509/* Find an entry in the context */
510struct sysctl_ctx_entry *
511sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
512{
513	struct sysctl_ctx_entry *e;
514
515	SYSCTL_ASSERT_XLOCKED();
516	if (clist == NULL || oidp == NULL)
517		return(NULL);
518	TAILQ_FOREACH(e, clist, link) {
519		if(e->entry == oidp)
520			return(e);
521	}
522	return (e);
523}
524
525/*
526 * Delete an entry from the context.
527 * NOTE: this function doesn't free oidp! You have to remove it
528 * with sysctl_remove_oid().
529 */
530int
531sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp)
532{
533	struct sysctl_ctx_entry *e;
534
535	if (clist == NULL || oidp == NULL)
536		return (EINVAL);
537	SYSCTL_XLOCK();
538	e = sysctl_ctx_entry_find(clist, oidp);
539	if (e != NULL) {
540		TAILQ_REMOVE(clist, e, link);
541		SYSCTL_XUNLOCK();
542		free(e, M_SYSCTLOID);
543		return (0);
544	} else {
545		SYSCTL_XUNLOCK();
546		return (ENOENT);
547	}
548}
549
550/*
551 * Remove dynamically created sysctl trees.
552 * oidp - top of the tree to be removed
553 * del - if 0 - just deregister, otherwise free up entries as well
554 * recurse - if != 0 traverse the subtree to be deleted
555 */
556int
557sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse)
558{
559	int error;
560
561	SYSCTL_XLOCK();
562	error = sysctl_remove_oid_locked(oidp, del, recurse);
563	SYSCTL_XUNLOCK();
564	return (error);
565}
566
567int
568sysctl_remove_name(struct sysctl_oid *parent, const char *name,
569    int del, int recurse)
570{
571	struct sysctl_oid *p, *tmp;
572	int error;
573
574	error = ENOENT;
575	SYSCTL_XLOCK();
576	SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) {
577		if (strcmp(p->oid_name, name) == 0) {
578			error = sysctl_remove_oid_locked(p, del, recurse);
579			break;
580		}
581	}
582	SYSCTL_XUNLOCK();
583
584	return (error);
585}
586
587
588static int
589sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse)
590{
591	struct sysctl_oid *p, *tmp;
592	int error;
593
594	SYSCTL_ASSERT_XLOCKED();
595	if (oidp == NULL)
596		return(EINVAL);
597	if ((oidp->oid_kind & CTLFLAG_DYN) == 0) {
598		printf("can't remove non-dynamic nodes!\n");
599		return (EINVAL);
600	}
601	/*
602	 * WARNING: normal method to do this should be through
603	 * sysctl_ctx_free(). Use recursing as the last resort
604	 * method to purge your sysctl tree of leftovers...
605	 * However, if some other code still references these nodes,
606	 * it will panic.
607	 */
608	if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
609		if (oidp->oid_refcnt == 1) {
610			SLIST_FOREACH_SAFE(p,
611			    SYSCTL_CHILDREN(oidp), oid_link, tmp) {
612				if (!recurse) {
613					printf("Warning: failed attempt to "
614					    "remove oid %s with child %s\n",
615					    oidp->oid_name, p->oid_name);
616					return (ENOTEMPTY);
617				}
618				error = sysctl_remove_oid_locked(p, del,
619				    recurse);
620				if (error)
621					return (error);
622			}
623		}
624	}
625	if (oidp->oid_refcnt > 1 ) {
626		oidp->oid_refcnt--;
627	} else {
628		if (oidp->oid_refcnt == 0) {
629			printf("Warning: bad oid_refcnt=%u (%s)!\n",
630				oidp->oid_refcnt, oidp->oid_name);
631			return (EINVAL);
632		}
633		sysctl_unregister_oid(oidp);
634		if (del) {
635			/*
636			 * Wait for all threads running the handler to drain.
637			 * This preserves the previous behavior when the
638			 * sysctl lock was held across a handler invocation,
639			 * and is necessary for module unload correctness.
640			 */
641			while (oidp->oid_running > 0) {
642				oidp->oid_kind |= CTLFLAG_DYING;
643				SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0);
644			}
645			if (oidp->oid_descr)
646				free(__DECONST(char *, oidp->oid_descr),
647				    M_SYSCTLOID);
648			free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID);
649			free(oidp, M_SYSCTLOID);
650		}
651	}
652	return (0);
653}
654/*
655 * Create new sysctls at run time.
656 * clist may point to a valid context initialized with sysctl_ctx_init().
657 */
658struct sysctl_oid *
659sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent,
660	int number, const char *name, int kind, void *arg1, intptr_t arg2,
661	int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr)
662{
663	struct sysctl_oid *oidp;
664
665	/* You have to hook up somewhere.. */
666	if (parent == NULL)
667		return(NULL);
668	/* Check if the node already exists, otherwise create it */
669	SYSCTL_XLOCK();
670	oidp = sysctl_find_oidname(name, parent);
671	if (oidp != NULL) {
672		if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
673			oidp->oid_refcnt++;
674			/* Update the context */
675			if (clist != NULL)
676				sysctl_ctx_entry_add(clist, oidp);
677			SYSCTL_XUNLOCK();
678			return (oidp);
679		} else {
680			SYSCTL_XUNLOCK();
681			printf("can't re-use a leaf (%s)!\n", name);
682			return (NULL);
683		}
684	}
685	oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO);
686	oidp->oid_parent = parent;
687	SLIST_INIT(&oidp->oid_children);
688	oidp->oid_number = number;
689	oidp->oid_refcnt = 1;
690	oidp->oid_name = strdup(name, M_SYSCTLOID);
691	oidp->oid_handler = handler;
692	oidp->oid_kind = CTLFLAG_DYN | kind;
693	oidp->oid_arg1 = arg1;
694	oidp->oid_arg2 = arg2;
695	oidp->oid_fmt = fmt;
696	if (descr != NULL)
697		oidp->oid_descr = strdup(descr, M_SYSCTLOID);
698	/* Update the context, if used */
699	if (clist != NULL)
700		sysctl_ctx_entry_add(clist, oidp);
701	/* Register this oid */
702	sysctl_register_oid(oidp);
703	SYSCTL_XUNLOCK();
704	return (oidp);
705}
706
707/*
708 * Rename an existing oid.
709 */
710void
711sysctl_rename_oid(struct sysctl_oid *oidp, const char *name)
712{
713	char *newname;
714	char *oldname;
715
716	newname = strdup(name, M_SYSCTLOID);
717	SYSCTL_XLOCK();
718	oldname = __DECONST(char *, oidp->oid_name);
719	oidp->oid_name = newname;
720	SYSCTL_XUNLOCK();
721	free(oldname, M_SYSCTLOID);
722}
723
724/*
725 * Reparent an existing oid.
726 */
727int
728sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent)
729{
730	struct sysctl_oid *oidp;
731
732	SYSCTL_XLOCK();
733	if (oid->oid_parent == parent) {
734		SYSCTL_XUNLOCK();
735		return (0);
736	}
737	oidp = sysctl_find_oidname(oid->oid_name, parent);
738	if (oidp != NULL) {
739		SYSCTL_XUNLOCK();
740		return (EEXIST);
741	}
742	sysctl_unregister_oid(oid);
743	oid->oid_parent = parent;
744	oid->oid_number = OID_AUTO;
745	sysctl_register_oid(oid);
746	SYSCTL_XUNLOCK();
747	return (0);
748}
749
750/*
751 * Register the kernel's oids on startup.
752 */
753SET_DECLARE(sysctl_set, struct sysctl_oid);
754
755static void
756sysctl_register_all(void *arg)
757{
758	struct sysctl_oid **oidp;
759
760	sx_init(&sysctlmemlock, "sysctl mem");
761	SYSCTL_INIT();
762	SYSCTL_XLOCK();
763	SET_FOREACH(oidp, sysctl_set)
764		sysctl_register_oid(*oidp);
765	SYSCTL_XUNLOCK();
766}
767SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0);
768
769/*
770 * "Staff-functions"
771 *
772 * These functions implement a presently undocumented interface
773 * used by the sysctl program to walk the tree, and get the type
774 * so it can print the value.
775 * This interface is under work and consideration, and should probably
776 * be killed with a big axe by the first person who can find the time.
777 * (be aware though, that the proper interface isn't as obvious as it
778 * may seem, there are various conflicting requirements.
779 *
780 * {0,0}	printf the entire MIB-tree.
781 * {0,1,...}	return the name of the "..." OID.
782 * {0,2,...}	return the next OID.
783 * {0,3}	return the OID of the name in "new"
784 * {0,4,...}	return the kind & format info for the "..." OID.
785 * {0,5,...}	return the description the "..." OID.
786 */
787
788#ifdef SYSCTL_DEBUG
789static void
790sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i)
791{
792	int k;
793	struct sysctl_oid *oidp;
794
795	SYSCTL_ASSERT_LOCKED();
796	SLIST_FOREACH(oidp, l, oid_link) {
797
798		for (k=0; k<i; k++)
799			printf(" ");
800
801		printf("%d %s ", oidp->oid_number, oidp->oid_name);
802
803		printf("%c%c",
804			oidp->oid_kind & CTLFLAG_RD ? 'R':' ',
805			oidp->oid_kind & CTLFLAG_WR ? 'W':' ');
806
807		if (oidp->oid_handler)
808			printf(" *Handler");
809
810		switch (oidp->oid_kind & CTLTYPE) {
811			case CTLTYPE_NODE:
812				printf(" Node\n");
813				if (!oidp->oid_handler) {
814					sysctl_sysctl_debug_dump_node(
815					    SYSCTL_CHILDREN(oidp), i + 2);
816				}
817				break;
818			case CTLTYPE_INT:    printf(" Int\n"); break;
819			case CTLTYPE_UINT:   printf(" u_int\n"); break;
820			case CTLTYPE_LONG:   printf(" Long\n"); break;
821			case CTLTYPE_ULONG:  printf(" u_long\n"); break;
822			case CTLTYPE_STRING: printf(" String\n"); break;
823			case CTLTYPE_U64:    printf(" uint64_t\n"); break;
824			case CTLTYPE_S64:    printf(" int64_t\n"); break;
825			case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break;
826			default:	     printf("\n");
827		}
828
829	}
830}
831
832static int
833sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS)
834{
835	int error;
836
837	error = priv_check(req->td, PRIV_SYSCTL_DEBUG);
838	if (error)
839		return (error);
840	SYSCTL_SLOCK();
841	sysctl_sysctl_debug_dump_node(&sysctl__children, 0);
842	SYSCTL_SUNLOCK();
843	return (ENOENT);
844}
845
846SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE,
847	0, 0, sysctl_sysctl_debug, "-", "");
848#endif
849
850static int
851sysctl_sysctl_name(SYSCTL_HANDLER_ARGS)
852{
853	int *name = (int *) arg1;
854	u_int namelen = arg2;
855	int error = 0;
856	struct sysctl_oid *oid;
857	struct sysctl_oid_list *lsp = &sysctl__children, *lsp2;
858	char buf[10];
859
860	SYSCTL_SLOCK();
861	while (namelen) {
862		if (!lsp) {
863			snprintf(buf,sizeof(buf),"%d",*name);
864			if (req->oldidx)
865				error = SYSCTL_OUT(req, ".", 1);
866			if (!error)
867				error = SYSCTL_OUT(req, buf, strlen(buf));
868			if (error)
869				goto out;
870			namelen--;
871			name++;
872			continue;
873		}
874		lsp2 = 0;
875		SLIST_FOREACH(oid, lsp, oid_link) {
876			if (oid->oid_number != *name)
877				continue;
878
879			if (req->oldidx)
880				error = SYSCTL_OUT(req, ".", 1);
881			if (!error)
882				error = SYSCTL_OUT(req, oid->oid_name,
883					strlen(oid->oid_name));
884			if (error)
885				goto out;
886
887			namelen--;
888			name++;
889
890			if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE)
891				break;
892
893			if (oid->oid_handler)
894				break;
895
896			lsp2 = SYSCTL_CHILDREN(oid);
897			break;
898		}
899		lsp = lsp2;
900	}
901	error = SYSCTL_OUT(req, "", 1);
902 out:
903	SYSCTL_SUNLOCK();
904	return (error);
905}
906
907/*
908 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in
909 * capability mode.
910 */
911static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
912    sysctl_sysctl_name, "");
913
914static int
915sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen,
916	int *next, int *len, int level, struct sysctl_oid **oidpp)
917{
918	struct sysctl_oid *oidp;
919
920	SYSCTL_ASSERT_LOCKED();
921	*len = level;
922	SLIST_FOREACH(oidp, lsp, oid_link) {
923		*next = oidp->oid_number;
924		*oidpp = oidp;
925
926		if (oidp->oid_kind & CTLFLAG_SKIP)
927			continue;
928
929		if (!namelen) {
930			if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
931				return (0);
932			if (oidp->oid_handler)
933				/* We really should call the handler here...*/
934				return (0);
935			lsp = SYSCTL_CHILDREN(oidp);
936			if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1,
937				len, level+1, oidpp))
938				return (0);
939			goto emptynode;
940		}
941
942		if (oidp->oid_number < *name)
943			continue;
944
945		if (oidp->oid_number > *name) {
946			if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
947				return (0);
948			if (oidp->oid_handler)
949				return (0);
950			lsp = SYSCTL_CHILDREN(oidp);
951			if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1,
952				next+1, len, level+1, oidpp))
953				return (0);
954			goto next;
955		}
956		if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
957			continue;
958
959		if (oidp->oid_handler)
960			continue;
961
962		lsp = SYSCTL_CHILDREN(oidp);
963		if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1,
964			len, level+1, oidpp))
965			return (0);
966	next:
967		namelen = 1;
968	emptynode:
969		*len = level;
970	}
971	return (1);
972}
973
974static int
975sysctl_sysctl_next(SYSCTL_HANDLER_ARGS)
976{
977	int *name = (int *) arg1;
978	u_int namelen = arg2;
979	int i, j, error;
980	struct sysctl_oid *oid;
981	struct sysctl_oid_list *lsp = &sysctl__children;
982	int newoid[CTL_MAXNAME];
983
984	SYSCTL_SLOCK();
985	i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid);
986	SYSCTL_SUNLOCK();
987	if (i)
988		return (ENOENT);
989	error = SYSCTL_OUT(req, newoid, j * sizeof (int));
990	return (error);
991}
992
993/*
994 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in
995 * capability mode.
996 */
997static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD,
998    sysctl_sysctl_next, "");
999
1000static int
1001name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp)
1002{
1003	struct sysctl_oid *oidp;
1004	struct sysctl_oid_list *lsp = &sysctl__children;
1005	char *p;
1006
1007	SYSCTL_ASSERT_LOCKED();
1008
1009	for (*len = 0; *len < CTL_MAXNAME;) {
1010		p = strsep(&name, ".");
1011
1012		oidp = SLIST_FIRST(lsp);
1013		for (;; oidp = SLIST_NEXT(oidp, oid_link)) {
1014			if (oidp == NULL)
1015				return (ENOENT);
1016			if (strcmp(p, oidp->oid_name) == 0)
1017				break;
1018		}
1019		*oid++ = oidp->oid_number;
1020		(*len)++;
1021
1022		if (name == NULL || *name == '\0') {
1023			if (oidpp)
1024				*oidpp = oidp;
1025			return (0);
1026		}
1027
1028		if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE)
1029			break;
1030
1031		if (oidp->oid_handler)
1032			break;
1033
1034		lsp = SYSCTL_CHILDREN(oidp);
1035	}
1036	return (ENOENT);
1037}
1038
1039static int
1040sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS)
1041{
1042	char *p;
1043	int error, oid[CTL_MAXNAME], len = 0;
1044	struct sysctl_oid *op = 0;
1045
1046	if (!req->newlen)
1047		return (ENOENT);
1048	if (req->newlen >= MAXPATHLEN)	/* XXX arbitrary, undocumented */
1049		return (ENAMETOOLONG);
1050
1051	p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK);
1052
1053	error = SYSCTL_IN(req, p, req->newlen);
1054	if (error) {
1055		free(p, M_SYSCTL);
1056		return (error);
1057	}
1058
1059	p [req->newlen] = '\0';
1060
1061	SYSCTL_SLOCK();
1062	error = name2oid(p, oid, &len, &op);
1063	SYSCTL_SUNLOCK();
1064
1065	free(p, M_SYSCTL);
1066
1067	if (error)
1068		return (error);
1069
1070	error = SYSCTL_OUT(req, oid, len * sizeof *oid);
1071	return (error);
1072}
1073
1074/*
1075 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in
1076 * capability mode.
1077 */
1078SYSCTL_PROC(_sysctl, 3, name2oid,
1079    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE
1080    | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", "");
1081
1082static int
1083sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS)
1084{
1085	struct sysctl_oid *oid;
1086	int error;
1087
1088	SYSCTL_SLOCK();
1089	error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1090	if (error)
1091		goto out;
1092
1093	if (oid->oid_fmt == NULL) {
1094		error = ENOENT;
1095		goto out;
1096	}
1097	error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind));
1098	if (error)
1099		goto out;
1100	error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1);
1101 out:
1102	SYSCTL_SUNLOCK();
1103	return (error);
1104}
1105
1106
1107static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1108    sysctl_sysctl_oidfmt, "");
1109
1110static int
1111sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS)
1112{
1113	struct sysctl_oid *oid;
1114	int error;
1115
1116	SYSCTL_SLOCK();
1117	error = sysctl_find_oid(arg1, arg2, &oid, NULL, req);
1118	if (error)
1119		goto out;
1120
1121	if (oid->oid_descr == NULL) {
1122		error = ENOENT;
1123		goto out;
1124	}
1125	error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1);
1126 out:
1127	SYSCTL_SUNLOCK();
1128	return (error);
1129}
1130
1131static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD,
1132    sysctl_sysctl_oiddescr, "");
1133
1134/*
1135 * Default "handler" functions.
1136 */
1137
1138/*
1139 * Handle an int, signed or unsigned.
1140 * Two cases:
1141 *     a variable:  point arg1 at it.
1142 *     a constant:  pass it in arg2.
1143 */
1144
1145int
1146sysctl_handle_int(SYSCTL_HANDLER_ARGS)
1147{
1148	int tmpout, error = 0;
1149
1150	/*
1151	 * Attempt to get a coherent snapshot by making a copy of the data.
1152	 */
1153	if (arg1)
1154		tmpout = *(int *)arg1;
1155	else
1156		tmpout = arg2;
1157	error = SYSCTL_OUT(req, &tmpout, sizeof(int));
1158
1159	if (error || !req->newptr)
1160		return (error);
1161
1162	if (!arg1)
1163		error = EPERM;
1164	else
1165		error = SYSCTL_IN(req, arg1, sizeof(int));
1166	return (error);
1167}
1168
1169/*
1170 * Based on on sysctl_handle_int() convert milliseconds into ticks.
1171 * Note: this is used by TCP.
1172 */
1173
1174int
1175sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS)
1176{
1177	int error, s, tt;
1178
1179	tt = *(int *)arg1;
1180	s = (int)((int64_t)tt * 1000 / hz);
1181
1182	error = sysctl_handle_int(oidp, &s, 0, req);
1183	if (error || !req->newptr)
1184		return (error);
1185
1186	tt = (int)((int64_t)s * hz / 1000);
1187	if (tt < 1)
1188		return (EINVAL);
1189
1190	*(int *)arg1 = tt;
1191	return (0);
1192}
1193
1194
1195/*
1196 * Handle a long, signed or unsigned.
1197 * Two cases:
1198 *     a variable:  point arg1 at it.
1199 *     a constant:  pass it in arg2.
1200 */
1201
1202int
1203sysctl_handle_long(SYSCTL_HANDLER_ARGS)
1204{
1205	int error = 0;
1206	long tmplong;
1207#ifdef SCTL_MASK32
1208	int tmpint;
1209#endif
1210
1211	/*
1212	 * Attempt to get a coherent snapshot by making a copy of the data.
1213	 */
1214	if (arg1)
1215		tmplong = *(long *)arg1;
1216	else
1217		tmplong = arg2;
1218#ifdef SCTL_MASK32
1219	if (req->flags & SCTL_MASK32) {
1220		tmpint = tmplong;
1221		error = SYSCTL_OUT(req, &tmpint, sizeof(int));
1222	} else
1223#endif
1224		error = SYSCTL_OUT(req, &tmplong, sizeof(long));
1225
1226	if (error || !req->newptr)
1227		return (error);
1228
1229	if (!arg1)
1230		error = EPERM;
1231#ifdef SCTL_MASK32
1232	else if (req->flags & SCTL_MASK32) {
1233		error = SYSCTL_IN(req, &tmpint, sizeof(int));
1234		*(long *)arg1 = (long)tmpint;
1235	}
1236#endif
1237	else
1238		error = SYSCTL_IN(req, arg1, sizeof(long));
1239	return (error);
1240}
1241
1242/*
1243 * Handle a 64 bit int, signed or unsigned.
1244 * Two cases:
1245 *     a variable:  point arg1 at it.
1246 *     a constant:  pass it in arg2.
1247 */
1248int
1249sysctl_handle_64(SYSCTL_HANDLER_ARGS)
1250{
1251	int error = 0;
1252	uint64_t tmpout;
1253
1254	/*
1255	 * Attempt to get a coherent snapshot by making a copy of the data.
1256	 */
1257	if (arg1)
1258		tmpout = *(uint64_t *)arg1;
1259	else
1260		tmpout = arg2;
1261	error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t));
1262
1263	if (error || !req->newptr)
1264		return (error);
1265
1266	if (!arg1)
1267		error = EPERM;
1268	else
1269		error = SYSCTL_IN(req, arg1, sizeof(uint64_t));
1270	return (error);
1271}
1272
1273/*
1274 * Handle our generic '\0' terminated 'C' string.
1275 * Two cases:
1276 * 	a variable string:  point arg1 at it, arg2 is max length.
1277 * 	a constant string:  point arg1 at it, arg2 is zero.
1278 */
1279
1280int
1281sysctl_handle_string(SYSCTL_HANDLER_ARGS)
1282{
1283	size_t outlen;
1284	int error = 0, ro_string = 0;
1285
1286	/*
1287	 * A zero-length buffer indicates a fixed size read-only
1288	 * string:
1289	 */
1290	if (arg2 == 0) {
1291		arg2 = strlen((char *)arg1) + 1;
1292		ro_string = 1;
1293	}
1294
1295	if (req->oldptr != NULL) {
1296		char *tmparg;
1297
1298		if (ro_string) {
1299			tmparg = arg1;
1300		} else {
1301			/* try to make a coherent snapshot of the string */
1302			tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK);
1303			memcpy(tmparg, arg1, arg2);
1304		}
1305
1306		outlen = strnlen(tmparg, arg2 - 1) + 1;
1307		error = SYSCTL_OUT(req, tmparg, outlen);
1308
1309		if (!ro_string)
1310			free(tmparg, M_SYSCTLTMP);
1311	} else {
1312		outlen = strnlen((char *)arg1, arg2 - 1) + 1;
1313		error = SYSCTL_OUT(req, NULL, outlen);
1314	}
1315	if (error || !req->newptr)
1316		return (error);
1317
1318	if ((req->newlen - req->newidx) >= arg2) {
1319		error = EINVAL;
1320	} else {
1321		arg2 = (req->newlen - req->newidx);
1322		error = SYSCTL_IN(req, arg1, arg2);
1323		((char *)arg1)[arg2] = '\0';
1324	}
1325	return (error);
1326}
1327
1328/*
1329 * Handle any kind of opaque data.
1330 * arg1 points to it, arg2 is the size.
1331 */
1332
1333int
1334sysctl_handle_opaque(SYSCTL_HANDLER_ARGS)
1335{
1336	int error, tries;
1337	u_int generation;
1338	struct sysctl_req req2;
1339
1340	/*
1341	 * Attempt to get a coherent snapshot, by using the thread
1342	 * pre-emption counter updated from within mi_switch() to
1343	 * determine if we were pre-empted during a bcopy() or
1344	 * copyout(). Make 3 attempts at doing this before giving up.
1345	 * If we encounter an error, stop immediately.
1346	 */
1347	tries = 0;
1348	req2 = *req;
1349retry:
1350	generation = curthread->td_generation;
1351	error = SYSCTL_OUT(req, arg1, arg2);
1352	if (error)
1353		return (error);
1354	tries++;
1355	if (generation != curthread->td_generation && tries < 3) {
1356		*req = req2;
1357		goto retry;
1358	}
1359
1360	error = SYSCTL_IN(req, arg1, arg2);
1361
1362	return (error);
1363}
1364
1365/*
1366 * Transfer functions to/from kernel space.
1367 * XXX: rather untested at this point
1368 */
1369static int
1370sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l)
1371{
1372	size_t i = 0;
1373
1374	if (req->oldptr) {
1375		i = l;
1376		if (req->oldlen <= req->oldidx)
1377			i = 0;
1378		else
1379			if (i > req->oldlen - req->oldidx)
1380				i = req->oldlen - req->oldidx;
1381		if (i > 0)
1382			bcopy(p, (char *)req->oldptr + req->oldidx, i);
1383	}
1384	req->oldidx += l;
1385	if (req->oldptr && i != l)
1386		return (ENOMEM);
1387	return (0);
1388}
1389
1390static int
1391sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l)
1392{
1393	if (!req->newptr)
1394		return (0);
1395	if (req->newlen - req->newidx < l)
1396		return (EINVAL);
1397	bcopy((char *)req->newptr + req->newidx, p, l);
1398	req->newidx += l;
1399	return (0);
1400}
1401
1402int
1403kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1404    size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags)
1405{
1406	int error = 0;
1407	struct sysctl_req req;
1408
1409	bzero(&req, sizeof req);
1410
1411	req.td = td;
1412	req.flags = flags;
1413
1414	if (oldlenp) {
1415		req.oldlen = *oldlenp;
1416	}
1417	req.validlen = req.oldlen;
1418
1419	if (old) {
1420		req.oldptr= old;
1421	}
1422
1423	if (new != NULL) {
1424		req.newlen = newlen;
1425		req.newptr = new;
1426	}
1427
1428	req.oldfunc = sysctl_old_kernel;
1429	req.newfunc = sysctl_new_kernel;
1430	req.lock = REQ_UNWIRED;
1431
1432	SYSCTL_SLOCK();
1433	error = sysctl_root(0, name, namelen, &req);
1434	SYSCTL_SUNLOCK();
1435
1436	if (req.lock == REQ_WIRED && req.validlen > 0)
1437		vsunlock(req.oldptr, req.validlen);
1438
1439	if (error && error != ENOMEM)
1440		return (error);
1441
1442	if (retval) {
1443		if (req.oldptr && req.oldidx > req.validlen)
1444			*retval = req.validlen;
1445		else
1446			*retval = req.oldidx;
1447	}
1448	return (error);
1449}
1450
1451int
1452kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp,
1453    void *new, size_t newlen, size_t *retval, int flags)
1454{
1455        int oid[CTL_MAXNAME];
1456        size_t oidlen, plen;
1457	int error;
1458
1459	oid[0] = 0;		/* sysctl internal magic */
1460	oid[1] = 3;		/* name2oid */
1461	oidlen = sizeof(oid);
1462
1463	error = kernel_sysctl(td, oid, 2, oid, &oidlen,
1464	    (void *)name, strlen(name), &plen, flags);
1465	if (error)
1466		return (error);
1467
1468	error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp,
1469	    new, newlen, retval, flags);
1470	return (error);
1471}
1472
1473/*
1474 * Transfer function to/from user space.
1475 */
1476static int
1477sysctl_old_user(struct sysctl_req *req, const void *p, size_t l)
1478{
1479	size_t i, len, origidx;
1480	int error;
1481
1482	origidx = req->oldidx;
1483	req->oldidx += l;
1484	if (req->oldptr == NULL)
1485		return (0);
1486	/*
1487	 * If we have not wired the user supplied buffer and we are currently
1488	 * holding locks, drop a witness warning, as it's possible that
1489	 * write operations to the user page can sleep.
1490	 */
1491	if (req->lock != REQ_WIRED)
1492		WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1493		    "sysctl_old_user()");
1494	i = l;
1495	len = req->validlen;
1496	if (len <= origidx)
1497		i = 0;
1498	else {
1499		if (i > len - origidx)
1500			i = len - origidx;
1501		if (req->lock == REQ_WIRED) {
1502			error = copyout_nofault(p, (char *)req->oldptr +
1503			    origidx, i);
1504		} else
1505			error = copyout(p, (char *)req->oldptr + origidx, i);
1506		if (error != 0)
1507			return (error);
1508	}
1509	if (i < l)
1510		return (ENOMEM);
1511	return (0);
1512}
1513
1514static int
1515sysctl_new_user(struct sysctl_req *req, void *p, size_t l)
1516{
1517	int error;
1518
1519	if (!req->newptr)
1520		return (0);
1521	if (req->newlen - req->newidx < l)
1522		return (EINVAL);
1523	WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
1524	    "sysctl_new_user()");
1525	error = copyin((char *)req->newptr + req->newidx, p, l);
1526	req->newidx += l;
1527	return (error);
1528}
1529
1530/*
1531 * Wire the user space destination buffer.  If set to a value greater than
1532 * zero, the len parameter limits the maximum amount of wired memory.
1533 */
1534int
1535sysctl_wire_old_buffer(struct sysctl_req *req, size_t len)
1536{
1537	int ret;
1538	size_t wiredlen;
1539
1540	wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen;
1541	ret = 0;
1542	if (req->lock != REQ_WIRED && req->oldptr &&
1543	    req->oldfunc == sysctl_old_user) {
1544		if (wiredlen != 0) {
1545			ret = vslock(req->oldptr, wiredlen);
1546			if (ret != 0) {
1547				if (ret != ENOMEM)
1548					return (ret);
1549				wiredlen = 0;
1550			}
1551		}
1552		req->lock = REQ_WIRED;
1553		req->validlen = wiredlen;
1554	}
1555	return (0);
1556}
1557
1558int
1559sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid,
1560    int *nindx, struct sysctl_req *req)
1561{
1562	struct sysctl_oid_list *lsp;
1563	struct sysctl_oid *oid;
1564	int indx;
1565
1566	SYSCTL_ASSERT_LOCKED();
1567	lsp = &sysctl__children;
1568	indx = 0;
1569	while (indx < CTL_MAXNAME) {
1570		SLIST_FOREACH(oid, lsp, oid_link) {
1571			if (oid->oid_number == name[indx])
1572				break;
1573		}
1574		if (oid == NULL)
1575			return (ENOENT);
1576
1577		indx++;
1578		if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1579			if (oid->oid_handler != NULL || indx == namelen) {
1580				*noid = oid;
1581				if (nindx != NULL)
1582					*nindx = indx;
1583				KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1584				    ("%s found DYING node %p", __func__, oid));
1585				return (0);
1586			}
1587			lsp = SYSCTL_CHILDREN(oid);
1588		} else if (indx == namelen) {
1589			*noid = oid;
1590			if (nindx != NULL)
1591				*nindx = indx;
1592			KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0,
1593			    ("%s found DYING node %p", __func__, oid));
1594			return (0);
1595		} else {
1596			return (ENOTDIR);
1597		}
1598	}
1599	return (ENOENT);
1600}
1601
1602/*
1603 * Traverse our tree, and find the right node, execute whatever it points
1604 * to, and return the resulting error code.
1605 */
1606
1607static int
1608sysctl_root(SYSCTL_HANDLER_ARGS)
1609{
1610	struct sysctl_oid *oid;
1611	int error, indx, lvl;
1612
1613	SYSCTL_ASSERT_SLOCKED();
1614
1615	error = sysctl_find_oid(arg1, arg2, &oid, &indx, req);
1616	if (error)
1617		return (error);
1618
1619	if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1620		/*
1621		 * You can't call a sysctl when it's a node, but has
1622		 * no handler.  Inform the user that it's a node.
1623		 * The indx may or may not be the same as namelen.
1624		 */
1625		if (oid->oid_handler == NULL)
1626			return (EISDIR);
1627	}
1628
1629	/* Is this sysctl writable? */
1630	if (req->newptr && !(oid->oid_kind & CTLFLAG_WR))
1631		return (EPERM);
1632
1633	KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL"));
1634
1635#ifdef CAPABILITY_MODE
1636	/*
1637	 * If the process is in capability mode, then don't permit reading or
1638	 * writing unless specifically granted for the node.
1639	 */
1640	if (IN_CAPABILITY_MODE(req->td)) {
1641		if (req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD))
1642			return (EPERM);
1643		if (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))
1644			return (EPERM);
1645	}
1646#endif
1647
1648	/* Is this sysctl sensitive to securelevels? */
1649	if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) {
1650		lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE;
1651		error = securelevel_gt(req->td->td_ucred, lvl);
1652		if (error)
1653			return (error);
1654	}
1655
1656	/* Is this sysctl writable by only privileged users? */
1657	if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) {
1658		int priv;
1659
1660		if (oid->oid_kind & CTLFLAG_PRISON)
1661			priv = PRIV_SYSCTL_WRITEJAIL;
1662#ifdef VIMAGE
1663		else if ((oid->oid_kind & CTLFLAG_VNET) &&
1664		     prison_owns_vnet(req->td->td_ucred))
1665			priv = PRIV_SYSCTL_WRITEJAIL;
1666#endif
1667		else
1668			priv = PRIV_SYSCTL_WRITE;
1669		error = priv_check(req->td, priv);
1670		if (error)
1671			return (error);
1672	}
1673
1674	if (!oid->oid_handler)
1675		return (EINVAL);
1676
1677	if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) {
1678		arg1 = (int *)arg1 + indx;
1679		arg2 -= indx;
1680	} else {
1681		arg1 = oid->oid_arg1;
1682		arg2 = oid->oid_arg2;
1683	}
1684#ifdef MAC
1685	error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2,
1686	    req);
1687	if (error != 0)
1688		return (error);
1689#endif
1690#ifdef VIMAGE
1691	if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL)
1692		arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1);
1693#endif
1694	error = sysctl_root_handler_locked(oid, arg1, arg2, req);
1695
1696	KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error);
1697
1698	return (error);
1699}
1700
1701#ifndef _SYS_SYSPROTO_H_
1702struct sysctl_args {
1703	int	*name;
1704	u_int	namelen;
1705	void	*old;
1706	size_t	*oldlenp;
1707	void	*new;
1708	size_t	newlen;
1709};
1710#endif
1711int
1712sys___sysctl(struct thread *td, struct sysctl_args *uap)
1713{
1714	int error, i, name[CTL_MAXNAME];
1715	size_t j;
1716
1717	if (uap->namelen > CTL_MAXNAME || uap->namelen < 2)
1718		return (EINVAL);
1719
1720 	error = copyin(uap->name, &name, uap->namelen * sizeof(int));
1721 	if (error)
1722		return (error);
1723
1724	error = userland_sysctl(td, name, uap->namelen,
1725		uap->old, uap->oldlenp, 0,
1726		uap->new, uap->newlen, &j, 0);
1727	if (error && error != ENOMEM)
1728		return (error);
1729	if (uap->oldlenp) {
1730		i = copyout(&j, uap->oldlenp, sizeof(j));
1731		if (i)
1732			return (i);
1733	}
1734	return (error);
1735}
1736
1737/*
1738 * This is used from various compatibility syscalls too.  That's why name
1739 * must be in kernel space.
1740 */
1741int
1742userland_sysctl(struct thread *td, int *name, u_int namelen, void *old,
1743    size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval,
1744    int flags)
1745{
1746	int error = 0, memlocked;
1747	struct sysctl_req req;
1748
1749	bzero(&req, sizeof req);
1750
1751	req.td = td;
1752	req.flags = flags;
1753
1754	if (oldlenp) {
1755		if (inkernel) {
1756			req.oldlen = *oldlenp;
1757		} else {
1758			error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp));
1759			if (error)
1760				return (error);
1761		}
1762	}
1763	req.validlen = req.oldlen;
1764
1765	if (old) {
1766		if (!useracc(old, req.oldlen, VM_PROT_WRITE))
1767			return (EFAULT);
1768		req.oldptr= old;
1769	}
1770
1771	if (new != NULL) {
1772		if (!useracc(new, newlen, VM_PROT_READ))
1773			return (EFAULT);
1774		req.newlen = newlen;
1775		req.newptr = new;
1776	}
1777
1778	req.oldfunc = sysctl_old_user;
1779	req.newfunc = sysctl_new_user;
1780	req.lock = REQ_UNWIRED;
1781
1782#ifdef KTRACE
1783	if (KTRPOINT(curthread, KTR_SYSCTL))
1784		ktrsysctl(name, namelen);
1785#endif
1786
1787	if (req.oldptr && req.oldlen > PAGE_SIZE) {
1788		memlocked = 1;
1789		sx_xlock(&sysctlmemlock);
1790	} else
1791		memlocked = 0;
1792	CURVNET_SET(TD_TO_VNET(td));
1793
1794	for (;;) {
1795		req.oldidx = 0;
1796		req.newidx = 0;
1797		SYSCTL_SLOCK();
1798		error = sysctl_root(0, name, namelen, &req);
1799		SYSCTL_SUNLOCK();
1800		if (error != EAGAIN)
1801			break;
1802		kern_yield(PRI_USER);
1803	}
1804
1805	CURVNET_RESTORE();
1806
1807	if (req.lock == REQ_WIRED && req.validlen > 0)
1808		vsunlock(req.oldptr, req.validlen);
1809	if (memlocked)
1810		sx_xunlock(&sysctlmemlock);
1811
1812	if (error && error != ENOMEM)
1813		return (error);
1814
1815	if (retval) {
1816		if (req.oldptr && req.oldidx > req.validlen)
1817			*retval = req.validlen;
1818		else
1819			*retval = req.oldidx;
1820	}
1821	return (error);
1822}
1823
1824/*
1825 * Drain into a sysctl struct.  The user buffer should be wired if a page
1826 * fault would cause issue.
1827 */
1828static int
1829sbuf_sysctl_drain(void *arg, const char *data, int len)
1830{
1831	struct sysctl_req *req = arg;
1832	int error;
1833
1834	error = SYSCTL_OUT(req, data, len);
1835	KASSERT(error >= 0, ("Got unexpected negative value %d", error));
1836	return (error == 0 ? len : -error);
1837}
1838
1839struct sbuf *
1840sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length,
1841    struct sysctl_req *req)
1842{
1843
1844	/* Supply a default buffer size if none given. */
1845	if (buf == NULL && length == 0)
1846		length = 64;
1847	s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL);
1848	sbuf_set_drain(s, sbuf_sysctl_drain, req);
1849	return (s);
1850}
1851