kern_sysctl.c revision 327404
1/*- 2 * Copyright (c) 1982, 1986, 1989, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * Mike Karels at Berkeley Software Design, Inc. 7 * 8 * Quite extensively rewritten by Poul-Henning Kamp of the FreeBSD 9 * project, to make these variables more userfriendly. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_sysctl.c 8.4 (Berkeley) 4/14/94 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: stable/11/sys/kern/kern_sysctl.c 327404 2017-12-31 03:06:29Z mjg $"); 40 41#include "opt_capsicum.h" 42#include "opt_compat.h" 43#include "opt_ktrace.h" 44 45#include <sys/param.h> 46#include <sys/fail.h> 47#include <sys/systm.h> 48#include <sys/capsicum.h> 49#include <sys/kernel.h> 50#include <sys/sysctl.h> 51#include <sys/malloc.h> 52#include <sys/priv.h> 53#include <sys/proc.h> 54#include <sys/jail.h> 55#include <sys/lock.h> 56#include <sys/mutex.h> 57#include <sys/rmlock.h> 58#include <sys/sbuf.h> 59#include <sys/sx.h> 60#include <sys/sysproto.h> 61#include <sys/uio.h> 62#ifdef KTRACE 63#include <sys/ktrace.h> 64#endif 65 66#include <net/vnet.h> 67 68#include <security/mac/mac_framework.h> 69 70#include <vm/vm.h> 71#include <vm/vm_extern.h> 72 73static MALLOC_DEFINE(M_SYSCTL, "sysctl", "sysctl internal magic"); 74static MALLOC_DEFINE(M_SYSCTLOID, "sysctloid", "sysctl dynamic oids"); 75static MALLOC_DEFINE(M_SYSCTLTMP, "sysctltmp", "sysctl temp output buffer"); 76 77/* 78 * The sysctllock protects the MIB tree. It also protects sysctl 79 * contexts used with dynamic sysctls. The sysctl_register_oid() and 80 * sysctl_unregister_oid() routines require the sysctllock to already 81 * be held, so the sysctl_wlock() and sysctl_wunlock() routines are 82 * provided for the few places in the kernel which need to use that 83 * API rather than using the dynamic API. Use of the dynamic API is 84 * strongly encouraged for most code. 85 * 86 * The sysctlmemlock is used to limit the amount of user memory wired for 87 * sysctl requests. This is implemented by serializing any userland 88 * sysctl requests larger than a single page via an exclusive lock. 89 */ 90static struct rmlock sysctllock; 91static struct sx __exclusive_cache_line sysctlmemlock; 92 93#define SYSCTL_WLOCK() rm_wlock(&sysctllock) 94#define SYSCTL_WUNLOCK() rm_wunlock(&sysctllock) 95#define SYSCTL_RLOCK(tracker) rm_rlock(&sysctllock, (tracker)) 96#define SYSCTL_RUNLOCK(tracker) rm_runlock(&sysctllock, (tracker)) 97#define SYSCTL_WLOCKED() rm_wowned(&sysctllock) 98#define SYSCTL_ASSERT_LOCKED() rm_assert(&sysctllock, RA_LOCKED) 99#define SYSCTL_ASSERT_WLOCKED() rm_assert(&sysctllock, RA_WLOCKED) 100#define SYSCTL_ASSERT_RLOCKED() rm_assert(&sysctllock, RA_RLOCKED) 101#define SYSCTL_INIT() rm_init_flags(&sysctllock, "sysctl lock", \ 102 RM_SLEEPABLE) 103#define SYSCTL_SLEEP(ch, wmesg, timo) \ 104 rm_sleep(ch, &sysctllock, 0, wmesg, timo) 105 106static int sysctl_root(SYSCTL_HANDLER_ARGS); 107 108/* Root list */ 109struct sysctl_oid_list sysctl__children = SLIST_HEAD_INITIALIZER(&sysctl__children); 110 111static int sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, 112 int recurse); 113static int sysctl_old_kernel(struct sysctl_req *, const void *, size_t); 114static int sysctl_new_kernel(struct sysctl_req *, void *, size_t); 115 116static struct sysctl_oid * 117sysctl_find_oidname(const char *name, struct sysctl_oid_list *list) 118{ 119 struct sysctl_oid *oidp; 120 121 SYSCTL_ASSERT_LOCKED(); 122 SLIST_FOREACH(oidp, list, oid_link) { 123 if (strcmp(oidp->oid_name, name) == 0) { 124 return (oidp); 125 } 126 } 127 return (NULL); 128} 129 130/* 131 * Initialization of the MIB tree. 132 * 133 * Order by number in each list. 134 */ 135void 136sysctl_wlock(void) 137{ 138 139 SYSCTL_WLOCK(); 140} 141 142void 143sysctl_wunlock(void) 144{ 145 146 SYSCTL_WUNLOCK(); 147} 148 149static int 150sysctl_root_handler_locked(struct sysctl_oid *oid, void *arg1, intmax_t arg2, 151 struct sysctl_req *req, struct rm_priotracker *tracker) 152{ 153 int error; 154 155 if (oid->oid_kind & CTLFLAG_DYN) 156 atomic_add_int(&oid->oid_running, 1); 157 158 if (tracker != NULL) 159 SYSCTL_RUNLOCK(tracker); 160 else 161 SYSCTL_WUNLOCK(); 162 163 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 164 mtx_lock(&Giant); 165 error = oid->oid_handler(oid, arg1, arg2, req); 166 if (!(oid->oid_kind & CTLFLAG_MPSAFE)) 167 mtx_unlock(&Giant); 168 169 KFAIL_POINT_ERROR(_debug_fail_point, sysctl_running, error); 170 171 if (tracker != NULL) 172 SYSCTL_RLOCK(tracker); 173 else 174 SYSCTL_WLOCK(); 175 176 if (oid->oid_kind & CTLFLAG_DYN) { 177 if (atomic_fetchadd_int(&oid->oid_running, -1) == 1 && 178 (oid->oid_kind & CTLFLAG_DYING) != 0) 179 wakeup(&oid->oid_running); 180 } 181 182 return (error); 183} 184 185static void 186sysctl_load_tunable_by_oid_locked(struct sysctl_oid *oidp) 187{ 188 struct sysctl_req req; 189 struct sysctl_oid *curr; 190 char *penv = NULL; 191 char path[64]; 192 ssize_t rem = sizeof(path); 193 ssize_t len; 194 uint8_t val_8; 195 uint16_t val_16; 196 uint32_t val_32; 197 int val_int; 198 long val_long; 199 int64_t val_64; 200 quad_t val_quad; 201 int error; 202 203 path[--rem] = 0; 204 205 for (curr = oidp; curr != NULL; curr = SYSCTL_PARENT(curr)) { 206 len = strlen(curr->oid_name); 207 rem -= len; 208 if (curr != oidp) 209 rem -= 1; 210 if (rem < 0) { 211 printf("OID path exceeds %d bytes\n", (int)sizeof(path)); 212 return; 213 } 214 memcpy(path + rem, curr->oid_name, len); 215 if (curr != oidp) 216 path[rem + len] = '.'; 217 } 218 219 memset(&req, 0, sizeof(req)); 220 221 req.td = curthread; 222 req.oldfunc = sysctl_old_kernel; 223 req.newfunc = sysctl_new_kernel; 224 req.lock = REQ_UNWIRED; 225 226 switch (oidp->oid_kind & CTLTYPE) { 227 case CTLTYPE_INT: 228 if (getenv_int(path + rem, &val_int) == 0) 229 return; 230 req.newlen = sizeof(val_int); 231 req.newptr = &val_int; 232 break; 233 case CTLTYPE_UINT: 234 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 235 return; 236 req.newlen = sizeof(val_int); 237 req.newptr = &val_int; 238 break; 239 case CTLTYPE_LONG: 240 if (getenv_long(path + rem, &val_long) == 0) 241 return; 242 req.newlen = sizeof(val_long); 243 req.newptr = &val_long; 244 break; 245 case CTLTYPE_ULONG: 246 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 247 return; 248 req.newlen = sizeof(val_long); 249 req.newptr = &val_long; 250 break; 251 case CTLTYPE_S8: 252 if (getenv_int(path + rem, &val_int) == 0) 253 return; 254 val_8 = val_int; 255 req.newlen = sizeof(val_8); 256 req.newptr = &val_8; 257 break; 258 case CTLTYPE_S16: 259 if (getenv_int(path + rem, &val_int) == 0) 260 return; 261 val_16 = val_int; 262 req.newlen = sizeof(val_16); 263 req.newptr = &val_16; 264 break; 265 case CTLTYPE_S32: 266 if (getenv_long(path + rem, &val_long) == 0) 267 return; 268 val_32 = val_long; 269 req.newlen = sizeof(val_32); 270 req.newptr = &val_32; 271 break; 272 case CTLTYPE_S64: 273 if (getenv_quad(path + rem, &val_quad) == 0) 274 return; 275 val_64 = val_quad; 276 req.newlen = sizeof(val_64); 277 req.newptr = &val_64; 278 break; 279 case CTLTYPE_U8: 280 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 281 return; 282 val_8 = val_int; 283 req.newlen = sizeof(val_8); 284 req.newptr = &val_8; 285 break; 286 case CTLTYPE_U16: 287 if (getenv_uint(path + rem, (unsigned int *)&val_int) == 0) 288 return; 289 val_16 = val_int; 290 req.newlen = sizeof(val_16); 291 req.newptr = &val_16; 292 break; 293 case CTLTYPE_U32: 294 if (getenv_ulong(path + rem, (unsigned long *)&val_long) == 0) 295 return; 296 val_32 = val_long; 297 req.newlen = sizeof(val_32); 298 req.newptr = &val_32; 299 break; 300 case CTLTYPE_U64: 301 /* XXX there is no getenv_uquad() */ 302 if (getenv_quad(path + rem, &val_quad) == 0) 303 return; 304 val_64 = val_quad; 305 req.newlen = sizeof(val_64); 306 req.newptr = &val_64; 307 break; 308 case CTLTYPE_STRING: 309 penv = kern_getenv(path + rem); 310 if (penv == NULL) 311 return; 312 req.newlen = strlen(penv); 313 req.newptr = penv; 314 break; 315 default: 316 return; 317 } 318 error = sysctl_root_handler_locked(oidp, oidp->oid_arg1, 319 oidp->oid_arg2, &req, NULL); 320 if (error != 0) 321 printf("Setting sysctl %s failed: %d\n", path + rem, error); 322 if (penv != NULL) 323 freeenv(penv); 324} 325 326void 327sysctl_register_oid(struct sysctl_oid *oidp) 328{ 329 struct sysctl_oid_list *parent = oidp->oid_parent; 330 struct sysctl_oid *p; 331 struct sysctl_oid *q; 332 int oid_number; 333 int timeout = 2; 334 335 /* 336 * First check if another oid with the same name already 337 * exists in the parent's list. 338 */ 339 SYSCTL_ASSERT_WLOCKED(); 340 p = sysctl_find_oidname(oidp->oid_name, parent); 341 if (p != NULL) { 342 if ((p->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 343 p->oid_refcnt++; 344 return; 345 } else { 346 printf("can't re-use a leaf (%s)!\n", p->oid_name); 347 return; 348 } 349 } 350 /* get current OID number */ 351 oid_number = oidp->oid_number; 352 353#if (OID_AUTO >= 0) 354#error "OID_AUTO is expected to be a negative value" 355#endif 356 /* 357 * Any negative OID number qualifies as OID_AUTO. Valid OID 358 * numbers should always be positive. 359 * 360 * NOTE: DO NOT change the starting value here, change it in 361 * <sys/sysctl.h>, and make sure it is at least 256 to 362 * accommodate e.g. net.inet.raw as a static sysctl node. 363 */ 364 if (oid_number < 0) { 365 static int newoid; 366 367 /* 368 * By decrementing the next OID number we spend less 369 * time inserting the OIDs into a sorted list. 370 */ 371 if (--newoid < CTL_AUTO_START) 372 newoid = 0x7fffffff; 373 374 oid_number = newoid; 375 } 376 377 /* 378 * Insert the OID into the parent's list sorted by OID number. 379 */ 380retry: 381 q = NULL; 382 SLIST_FOREACH(p, parent, oid_link) { 383 /* check if the current OID number is in use */ 384 if (oid_number == p->oid_number) { 385 /* get the next valid OID number */ 386 if (oid_number < CTL_AUTO_START || 387 oid_number == 0x7fffffff) { 388 /* wraparound - restart */ 389 oid_number = CTL_AUTO_START; 390 /* don't loop forever */ 391 if (!timeout--) 392 panic("sysctl: Out of OID numbers\n"); 393 goto retry; 394 } else { 395 oid_number++; 396 } 397 } else if (oid_number < p->oid_number) 398 break; 399 q = p; 400 } 401 /* check for non-auto OID number collision */ 402 if (oidp->oid_number >= 0 && oidp->oid_number < CTL_AUTO_START && 403 oid_number >= CTL_AUTO_START) { 404 printf("sysctl: OID number(%d) is already in use for '%s'\n", 405 oidp->oid_number, oidp->oid_name); 406 } 407 /* update the OID number, if any */ 408 oidp->oid_number = oid_number; 409 if (q != NULL) 410 SLIST_INSERT_AFTER(q, oidp, oid_link); 411 else 412 SLIST_INSERT_HEAD(parent, oidp, oid_link); 413 414 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE && 415#ifdef VIMAGE 416 (oidp->oid_kind & CTLFLAG_VNET) == 0 && 417#endif 418 (oidp->oid_kind & CTLFLAG_TUN) != 0 && 419 (oidp->oid_kind & CTLFLAG_NOFETCH) == 0) { 420 /* only fetch value once */ 421 oidp->oid_kind |= CTLFLAG_NOFETCH; 422 /* try to fetch value from kernel environment */ 423 sysctl_load_tunable_by_oid_locked(oidp); 424 } 425} 426 427void 428sysctl_register_disabled_oid(struct sysctl_oid *oidp) 429{ 430 431 /* 432 * Mark the leaf as dormant if it's not to be immediately enabled. 433 * We do not disable nodes as they can be shared between modules 434 * and it is always safe to access a node. 435 */ 436 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 437 ("internal flag is set in oid_kind")); 438 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 439 oidp->oid_kind |= CTLFLAG_DORMANT; 440 sysctl_register_oid(oidp); 441} 442 443void 444sysctl_enable_oid(struct sysctl_oid *oidp) 445{ 446 447 SYSCTL_ASSERT_WLOCKED(); 448 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 449 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) == 0, 450 ("sysctl node is marked as dormant")); 451 return; 452 } 453 KASSERT((oidp->oid_kind & CTLFLAG_DORMANT) != 0, 454 ("enabling already enabled sysctl oid")); 455 oidp->oid_kind &= ~CTLFLAG_DORMANT; 456} 457 458void 459sysctl_unregister_oid(struct sysctl_oid *oidp) 460{ 461 struct sysctl_oid *p; 462 int error; 463 464 SYSCTL_ASSERT_WLOCKED(); 465 error = ENOENT; 466 if (oidp->oid_number == OID_AUTO) { 467 error = EINVAL; 468 } else { 469 SLIST_FOREACH(p, oidp->oid_parent, oid_link) { 470 if (p == oidp) { 471 SLIST_REMOVE(oidp->oid_parent, oidp, 472 sysctl_oid, oid_link); 473 error = 0; 474 break; 475 } 476 } 477 } 478 479 /* 480 * This can happen when a module fails to register and is 481 * being unloaded afterwards. It should not be a panic() 482 * for normal use. 483 */ 484 if (error) 485 printf("%s: failed to unregister sysctl\n", __func__); 486} 487 488/* Initialize a new context to keep track of dynamically added sysctls. */ 489int 490sysctl_ctx_init(struct sysctl_ctx_list *c) 491{ 492 493 if (c == NULL) { 494 return (EINVAL); 495 } 496 497 /* 498 * No locking here, the caller is responsible for not adding 499 * new nodes to a context until after this function has 500 * returned. 501 */ 502 TAILQ_INIT(c); 503 return (0); 504} 505 506/* Free the context, and destroy all dynamic oids registered in this context */ 507int 508sysctl_ctx_free(struct sysctl_ctx_list *clist) 509{ 510 struct sysctl_ctx_entry *e, *e1; 511 int error; 512 513 error = 0; 514 /* 515 * First perform a "dry run" to check if it's ok to remove oids. 516 * XXX FIXME 517 * XXX This algorithm is a hack. But I don't know any 518 * XXX better solution for now... 519 */ 520 SYSCTL_WLOCK(); 521 TAILQ_FOREACH(e, clist, link) { 522 error = sysctl_remove_oid_locked(e->entry, 0, 0); 523 if (error) 524 break; 525 } 526 /* 527 * Restore deregistered entries, either from the end, 528 * or from the place where error occurred. 529 * e contains the entry that was not unregistered 530 */ 531 if (error) 532 e1 = TAILQ_PREV(e, sysctl_ctx_list, link); 533 else 534 e1 = TAILQ_LAST(clist, sysctl_ctx_list); 535 while (e1 != NULL) { 536 sysctl_register_oid(e1->entry); 537 e1 = TAILQ_PREV(e1, sysctl_ctx_list, link); 538 } 539 if (error) { 540 SYSCTL_WUNLOCK(); 541 return(EBUSY); 542 } 543 /* Now really delete the entries */ 544 e = TAILQ_FIRST(clist); 545 while (e != NULL) { 546 e1 = TAILQ_NEXT(e, link); 547 error = sysctl_remove_oid_locked(e->entry, 1, 0); 548 if (error) 549 panic("sysctl_remove_oid: corrupt tree, entry: %s", 550 e->entry->oid_name); 551 free(e, M_SYSCTLOID); 552 e = e1; 553 } 554 SYSCTL_WUNLOCK(); 555 return (error); 556} 557 558/* Add an entry to the context */ 559struct sysctl_ctx_entry * 560sysctl_ctx_entry_add(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 561{ 562 struct sysctl_ctx_entry *e; 563 564 SYSCTL_ASSERT_WLOCKED(); 565 if (clist == NULL || oidp == NULL) 566 return(NULL); 567 e = malloc(sizeof(struct sysctl_ctx_entry), M_SYSCTLOID, M_WAITOK); 568 e->entry = oidp; 569 TAILQ_INSERT_HEAD(clist, e, link); 570 return (e); 571} 572 573/* Find an entry in the context */ 574struct sysctl_ctx_entry * 575sysctl_ctx_entry_find(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 576{ 577 struct sysctl_ctx_entry *e; 578 579 SYSCTL_ASSERT_WLOCKED(); 580 if (clist == NULL || oidp == NULL) 581 return(NULL); 582 TAILQ_FOREACH(e, clist, link) { 583 if(e->entry == oidp) 584 return(e); 585 } 586 return (e); 587} 588 589/* 590 * Delete an entry from the context. 591 * NOTE: this function doesn't free oidp! You have to remove it 592 * with sysctl_remove_oid(). 593 */ 594int 595sysctl_ctx_entry_del(struct sysctl_ctx_list *clist, struct sysctl_oid *oidp) 596{ 597 struct sysctl_ctx_entry *e; 598 599 if (clist == NULL || oidp == NULL) 600 return (EINVAL); 601 SYSCTL_WLOCK(); 602 e = sysctl_ctx_entry_find(clist, oidp); 603 if (e != NULL) { 604 TAILQ_REMOVE(clist, e, link); 605 SYSCTL_WUNLOCK(); 606 free(e, M_SYSCTLOID); 607 return (0); 608 } else { 609 SYSCTL_WUNLOCK(); 610 return (ENOENT); 611 } 612} 613 614/* 615 * Remove dynamically created sysctl trees. 616 * oidp - top of the tree to be removed 617 * del - if 0 - just deregister, otherwise free up entries as well 618 * recurse - if != 0 traverse the subtree to be deleted 619 */ 620int 621sysctl_remove_oid(struct sysctl_oid *oidp, int del, int recurse) 622{ 623 int error; 624 625 SYSCTL_WLOCK(); 626 error = sysctl_remove_oid_locked(oidp, del, recurse); 627 SYSCTL_WUNLOCK(); 628 return (error); 629} 630 631int 632sysctl_remove_name(struct sysctl_oid *parent, const char *name, 633 int del, int recurse) 634{ 635 struct sysctl_oid *p, *tmp; 636 int error; 637 638 error = ENOENT; 639 SYSCTL_WLOCK(); 640 SLIST_FOREACH_SAFE(p, SYSCTL_CHILDREN(parent), oid_link, tmp) { 641 if (strcmp(p->oid_name, name) == 0) { 642 error = sysctl_remove_oid_locked(p, del, recurse); 643 break; 644 } 645 } 646 SYSCTL_WUNLOCK(); 647 648 return (error); 649} 650 651 652static int 653sysctl_remove_oid_locked(struct sysctl_oid *oidp, int del, int recurse) 654{ 655 struct sysctl_oid *p, *tmp; 656 int error; 657 658 SYSCTL_ASSERT_WLOCKED(); 659 if (oidp == NULL) 660 return(EINVAL); 661 if ((oidp->oid_kind & CTLFLAG_DYN) == 0) { 662 printf("Warning: can't remove non-dynamic nodes (%s)!\n", 663 oidp->oid_name); 664 return (EINVAL); 665 } 666 /* 667 * WARNING: normal method to do this should be through 668 * sysctl_ctx_free(). Use recursing as the last resort 669 * method to purge your sysctl tree of leftovers... 670 * However, if some other code still references these nodes, 671 * it will panic. 672 */ 673 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 674 if (oidp->oid_refcnt == 1) { 675 SLIST_FOREACH_SAFE(p, 676 SYSCTL_CHILDREN(oidp), oid_link, tmp) { 677 if (!recurse) { 678 printf("Warning: failed attempt to " 679 "remove oid %s with child %s\n", 680 oidp->oid_name, p->oid_name); 681 return (ENOTEMPTY); 682 } 683 error = sysctl_remove_oid_locked(p, del, 684 recurse); 685 if (error) 686 return (error); 687 } 688 } 689 } 690 if (oidp->oid_refcnt > 1 ) { 691 oidp->oid_refcnt--; 692 } else { 693 if (oidp->oid_refcnt == 0) { 694 printf("Warning: bad oid_refcnt=%u (%s)!\n", 695 oidp->oid_refcnt, oidp->oid_name); 696 return (EINVAL); 697 } 698 sysctl_unregister_oid(oidp); 699 if (del) { 700 /* 701 * Wait for all threads running the handler to drain. 702 * This preserves the previous behavior when the 703 * sysctl lock was held across a handler invocation, 704 * and is necessary for module unload correctness. 705 */ 706 while (oidp->oid_running > 0) { 707 oidp->oid_kind |= CTLFLAG_DYING; 708 SYSCTL_SLEEP(&oidp->oid_running, "oidrm", 0); 709 } 710 if (oidp->oid_descr) 711 free(__DECONST(char *, oidp->oid_descr), 712 M_SYSCTLOID); 713 free(__DECONST(char *, oidp->oid_name), M_SYSCTLOID); 714 free(oidp, M_SYSCTLOID); 715 } 716 } 717 return (0); 718} 719/* 720 * Create new sysctls at run time. 721 * clist may point to a valid context initialized with sysctl_ctx_init(). 722 */ 723struct sysctl_oid * 724sysctl_add_oid(struct sysctl_ctx_list *clist, struct sysctl_oid_list *parent, 725 int number, const char *name, int kind, void *arg1, intmax_t arg2, 726 int (*handler)(SYSCTL_HANDLER_ARGS), const char *fmt, const char *descr) 727{ 728 struct sysctl_oid *oidp; 729 730 /* You have to hook up somewhere.. */ 731 if (parent == NULL) 732 return(NULL); 733 /* Check if the node already exists, otherwise create it */ 734 SYSCTL_WLOCK(); 735 oidp = sysctl_find_oidname(name, parent); 736 if (oidp != NULL) { 737 if ((oidp->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 738 oidp->oid_refcnt++; 739 /* Update the context */ 740 if (clist != NULL) 741 sysctl_ctx_entry_add(clist, oidp); 742 SYSCTL_WUNLOCK(); 743 return (oidp); 744 } else { 745 SYSCTL_WUNLOCK(); 746 printf("can't re-use a leaf (%s)!\n", name); 747 return (NULL); 748 } 749 } 750 oidp = malloc(sizeof(struct sysctl_oid), M_SYSCTLOID, M_WAITOK|M_ZERO); 751 oidp->oid_parent = parent; 752 SLIST_INIT(&oidp->oid_children); 753 oidp->oid_number = number; 754 oidp->oid_refcnt = 1; 755 oidp->oid_name = strdup(name, M_SYSCTLOID); 756 oidp->oid_handler = handler; 757 oidp->oid_kind = CTLFLAG_DYN | kind; 758 oidp->oid_arg1 = arg1; 759 oidp->oid_arg2 = arg2; 760 oidp->oid_fmt = fmt; 761 if (descr != NULL) 762 oidp->oid_descr = strdup(descr, M_SYSCTLOID); 763 /* Update the context, if used */ 764 if (clist != NULL) 765 sysctl_ctx_entry_add(clist, oidp); 766 /* Register this oid */ 767 sysctl_register_oid(oidp); 768 SYSCTL_WUNLOCK(); 769 return (oidp); 770} 771 772/* 773 * Rename an existing oid. 774 */ 775void 776sysctl_rename_oid(struct sysctl_oid *oidp, const char *name) 777{ 778 char *newname; 779 char *oldname; 780 781 newname = strdup(name, M_SYSCTLOID); 782 SYSCTL_WLOCK(); 783 oldname = __DECONST(char *, oidp->oid_name); 784 oidp->oid_name = newname; 785 SYSCTL_WUNLOCK(); 786 free(oldname, M_SYSCTLOID); 787} 788 789/* 790 * Reparent an existing oid. 791 */ 792int 793sysctl_move_oid(struct sysctl_oid *oid, struct sysctl_oid_list *parent) 794{ 795 struct sysctl_oid *oidp; 796 797 SYSCTL_WLOCK(); 798 if (oid->oid_parent == parent) { 799 SYSCTL_WUNLOCK(); 800 return (0); 801 } 802 oidp = sysctl_find_oidname(oid->oid_name, parent); 803 if (oidp != NULL) { 804 SYSCTL_WUNLOCK(); 805 return (EEXIST); 806 } 807 sysctl_unregister_oid(oid); 808 oid->oid_parent = parent; 809 oid->oid_number = OID_AUTO; 810 sysctl_register_oid(oid); 811 SYSCTL_WUNLOCK(); 812 return (0); 813} 814 815/* 816 * Register the kernel's oids on startup. 817 */ 818SET_DECLARE(sysctl_set, struct sysctl_oid); 819 820static void 821sysctl_register_all(void *arg) 822{ 823 struct sysctl_oid **oidp; 824 825 sx_init(&sysctlmemlock, "sysctl mem"); 826 SYSCTL_INIT(); 827 SYSCTL_WLOCK(); 828 SET_FOREACH(oidp, sysctl_set) 829 sysctl_register_oid(*oidp); 830 SYSCTL_WUNLOCK(); 831} 832SYSINIT(sysctl, SI_SUB_KMEM, SI_ORDER_FIRST, sysctl_register_all, 0); 833 834/* 835 * "Staff-functions" 836 * 837 * These functions implement a presently undocumented interface 838 * used by the sysctl program to walk the tree, and get the type 839 * so it can print the value. 840 * This interface is under work and consideration, and should probably 841 * be killed with a big axe by the first person who can find the time. 842 * (be aware though, that the proper interface isn't as obvious as it 843 * may seem, there are various conflicting requirements. 844 * 845 * {0,0} printf the entire MIB-tree. 846 * {0,1,...} return the name of the "..." OID. 847 * {0,2,...} return the next OID. 848 * {0,3} return the OID of the name in "new" 849 * {0,4,...} return the kind & format info for the "..." OID. 850 * {0,5,...} return the description the "..." OID. 851 */ 852 853#ifdef SYSCTL_DEBUG 854static void 855sysctl_sysctl_debug_dump_node(struct sysctl_oid_list *l, int i) 856{ 857 int k; 858 struct sysctl_oid *oidp; 859 860 SYSCTL_ASSERT_LOCKED(); 861 SLIST_FOREACH(oidp, l, oid_link) { 862 863 for (k=0; k<i; k++) 864 printf(" "); 865 866 printf("%d %s ", oidp->oid_number, oidp->oid_name); 867 868 printf("%c%c", 869 oidp->oid_kind & CTLFLAG_RD ? 'R':' ', 870 oidp->oid_kind & CTLFLAG_WR ? 'W':' '); 871 872 if (oidp->oid_handler) 873 printf(" *Handler"); 874 875 switch (oidp->oid_kind & CTLTYPE) { 876 case CTLTYPE_NODE: 877 printf(" Node\n"); 878 if (!oidp->oid_handler) { 879 sysctl_sysctl_debug_dump_node( 880 SYSCTL_CHILDREN(oidp), i + 2); 881 } 882 break; 883 case CTLTYPE_INT: printf(" Int\n"); break; 884 case CTLTYPE_UINT: printf(" u_int\n"); break; 885 case CTLTYPE_LONG: printf(" Long\n"); break; 886 case CTLTYPE_ULONG: printf(" u_long\n"); break; 887 case CTLTYPE_STRING: printf(" String\n"); break; 888 case CTLTYPE_S8: printf(" int8_t\n"); break; 889 case CTLTYPE_S16: printf(" int16_t\n"); break; 890 case CTLTYPE_S32: printf(" int32_t\n"); break; 891 case CTLTYPE_S64: printf(" int64_t\n"); break; 892 case CTLTYPE_U8: printf(" uint8_t\n"); break; 893 case CTLTYPE_U16: printf(" uint16_t\n"); break; 894 case CTLTYPE_U32: printf(" uint32_t\n"); break; 895 case CTLTYPE_U64: printf(" uint64_t\n"); break; 896 case CTLTYPE_OPAQUE: printf(" Opaque/struct\n"); break; 897 default: printf("\n"); 898 } 899 900 } 901} 902 903static int 904sysctl_sysctl_debug(SYSCTL_HANDLER_ARGS) 905{ 906 struct rm_priotracker tracker; 907 int error; 908 909 error = priv_check(req->td, PRIV_SYSCTL_DEBUG); 910 if (error) 911 return (error); 912 SYSCTL_RLOCK(&tracker); 913 sysctl_sysctl_debug_dump_node(&sysctl__children, 0); 914 SYSCTL_RUNLOCK(&tracker); 915 return (ENOENT); 916} 917 918SYSCTL_PROC(_sysctl, 0, debug, CTLTYPE_STRING|CTLFLAG_RD|CTLFLAG_MPSAFE, 919 0, 0, sysctl_sysctl_debug, "-", ""); 920#endif 921 922static int 923sysctl_sysctl_name(SYSCTL_HANDLER_ARGS) 924{ 925 int *name = (int *) arg1; 926 u_int namelen = arg2; 927 int error = 0; 928 struct sysctl_oid *oid; 929 struct sysctl_oid_list *lsp = &sysctl__children, *lsp2; 930 struct rm_priotracker tracker; 931 char buf[10]; 932 933 SYSCTL_RLOCK(&tracker); 934 while (namelen) { 935 if (!lsp) { 936 snprintf(buf,sizeof(buf),"%d",*name); 937 if (req->oldidx) 938 error = SYSCTL_OUT(req, ".", 1); 939 if (!error) 940 error = SYSCTL_OUT(req, buf, strlen(buf)); 941 if (error) 942 goto out; 943 namelen--; 944 name++; 945 continue; 946 } 947 lsp2 = NULL; 948 SLIST_FOREACH(oid, lsp, oid_link) { 949 if (oid->oid_number != *name) 950 continue; 951 952 if (req->oldidx) 953 error = SYSCTL_OUT(req, ".", 1); 954 if (!error) 955 error = SYSCTL_OUT(req, oid->oid_name, 956 strlen(oid->oid_name)); 957 if (error) 958 goto out; 959 960 namelen--; 961 name++; 962 963 if ((oid->oid_kind & CTLTYPE) != CTLTYPE_NODE) 964 break; 965 966 if (oid->oid_handler) 967 break; 968 969 lsp2 = SYSCTL_CHILDREN(oid); 970 break; 971 } 972 lsp = lsp2; 973 } 974 error = SYSCTL_OUT(req, "", 1); 975 out: 976 SYSCTL_RUNLOCK(&tracker); 977 return (error); 978} 979 980/* 981 * XXXRW/JA: Shouldn't return name data for nodes that we don't permit in 982 * capability mode. 983 */ 984static SYSCTL_NODE(_sysctl, 1, name, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 985 sysctl_sysctl_name, ""); 986 987static int 988sysctl_sysctl_next_ls(struct sysctl_oid_list *lsp, int *name, u_int namelen, 989 int *next, int *len, int level, struct sysctl_oid **oidpp) 990{ 991 struct sysctl_oid *oidp; 992 993 SYSCTL_ASSERT_LOCKED(); 994 *len = level; 995 SLIST_FOREACH(oidp, lsp, oid_link) { 996 *next = oidp->oid_number; 997 *oidpp = oidp; 998 999 if ((oidp->oid_kind & (CTLFLAG_SKIP | CTLFLAG_DORMANT)) != 0) 1000 continue; 1001 1002 if (!namelen) { 1003 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1004 return (0); 1005 if (oidp->oid_handler) 1006 /* We really should call the handler here...*/ 1007 return (0); 1008 lsp = SYSCTL_CHILDREN(oidp); 1009 if (!sysctl_sysctl_next_ls(lsp, 0, 0, next+1, 1010 len, level+1, oidpp)) 1011 return (0); 1012 goto emptynode; 1013 } 1014 1015 if (oidp->oid_number < *name) 1016 continue; 1017 1018 if (oidp->oid_number > *name) { 1019 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1020 return (0); 1021 if (oidp->oid_handler) 1022 return (0); 1023 lsp = SYSCTL_CHILDREN(oidp); 1024 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, 1025 next+1, len, level+1, oidpp)) 1026 return (0); 1027 goto next; 1028 } 1029 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1030 continue; 1031 1032 if (oidp->oid_handler) 1033 continue; 1034 1035 lsp = SYSCTL_CHILDREN(oidp); 1036 if (!sysctl_sysctl_next_ls(lsp, name+1, namelen-1, next+1, 1037 len, level+1, oidpp)) 1038 return (0); 1039 next: 1040 namelen = 1; 1041 emptynode: 1042 *len = level; 1043 } 1044 return (1); 1045} 1046 1047static int 1048sysctl_sysctl_next(SYSCTL_HANDLER_ARGS) 1049{ 1050 int *name = (int *) arg1; 1051 u_int namelen = arg2; 1052 int i, j, error; 1053 struct sysctl_oid *oid; 1054 struct sysctl_oid_list *lsp = &sysctl__children; 1055 struct rm_priotracker tracker; 1056 int newoid[CTL_MAXNAME]; 1057 1058 SYSCTL_RLOCK(&tracker); 1059 i = sysctl_sysctl_next_ls(lsp, name, namelen, newoid, &j, 1, &oid); 1060 SYSCTL_RUNLOCK(&tracker); 1061 if (i) 1062 return (ENOENT); 1063 error = SYSCTL_OUT(req, newoid, j * sizeof (int)); 1064 return (error); 1065} 1066 1067/* 1068 * XXXRW/JA: Shouldn't return next data for nodes that we don't permit in 1069 * capability mode. 1070 */ 1071static SYSCTL_NODE(_sysctl, 2, next, CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_CAPRD, 1072 sysctl_sysctl_next, ""); 1073 1074static int 1075name2oid(char *name, int *oid, int *len, struct sysctl_oid **oidpp) 1076{ 1077 struct sysctl_oid *oidp; 1078 struct sysctl_oid_list *lsp = &sysctl__children; 1079 char *p; 1080 1081 SYSCTL_ASSERT_LOCKED(); 1082 1083 for (*len = 0; *len < CTL_MAXNAME;) { 1084 p = strsep(&name, "."); 1085 1086 oidp = SLIST_FIRST(lsp); 1087 for (;; oidp = SLIST_NEXT(oidp, oid_link)) { 1088 if (oidp == NULL) 1089 return (ENOENT); 1090 if (strcmp(p, oidp->oid_name) == 0) 1091 break; 1092 } 1093 *oid++ = oidp->oid_number; 1094 (*len)++; 1095 1096 if (name == NULL || *name == '\0') { 1097 if (oidpp) 1098 *oidpp = oidp; 1099 return (0); 1100 } 1101 1102 if ((oidp->oid_kind & CTLTYPE) != CTLTYPE_NODE) 1103 break; 1104 1105 if (oidp->oid_handler) 1106 break; 1107 1108 lsp = SYSCTL_CHILDREN(oidp); 1109 } 1110 return (ENOENT); 1111} 1112 1113static int 1114sysctl_sysctl_name2oid(SYSCTL_HANDLER_ARGS) 1115{ 1116 char *p; 1117 int error, oid[CTL_MAXNAME], len = 0; 1118 struct sysctl_oid *op = NULL; 1119 struct rm_priotracker tracker; 1120 1121 if (!req->newlen) 1122 return (ENOENT); 1123 if (req->newlen >= MAXPATHLEN) /* XXX arbitrary, undocumented */ 1124 return (ENAMETOOLONG); 1125 1126 p = malloc(req->newlen+1, M_SYSCTL, M_WAITOK); 1127 1128 error = SYSCTL_IN(req, p, req->newlen); 1129 if (error) { 1130 free(p, M_SYSCTL); 1131 return (error); 1132 } 1133 1134 p [req->newlen] = '\0'; 1135 1136 SYSCTL_RLOCK(&tracker); 1137 error = name2oid(p, oid, &len, &op); 1138 SYSCTL_RUNLOCK(&tracker); 1139 1140 free(p, M_SYSCTL); 1141 1142 if (error) 1143 return (error); 1144 1145 error = SYSCTL_OUT(req, oid, len * sizeof *oid); 1146 return (error); 1147} 1148 1149/* 1150 * XXXRW/JA: Shouldn't return name2oid data for nodes that we don't permit in 1151 * capability mode. 1152 */ 1153SYSCTL_PROC(_sysctl, 3, name2oid, 1154 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE 1155 | CTLFLAG_CAPRW, 0, 0, sysctl_sysctl_name2oid, "I", ""); 1156 1157static int 1158sysctl_sysctl_oidfmt(SYSCTL_HANDLER_ARGS) 1159{ 1160 struct sysctl_oid *oid; 1161 struct rm_priotracker tracker; 1162 int error; 1163 1164 SYSCTL_RLOCK(&tracker); 1165 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1166 if (error) 1167 goto out; 1168 1169 if (oid->oid_fmt == NULL) { 1170 error = ENOENT; 1171 goto out; 1172 } 1173 error = SYSCTL_OUT(req, &oid->oid_kind, sizeof(oid->oid_kind)); 1174 if (error) 1175 goto out; 1176 error = SYSCTL_OUT(req, oid->oid_fmt, strlen(oid->oid_fmt) + 1); 1177 out: 1178 SYSCTL_RUNLOCK(&tracker); 1179 return (error); 1180} 1181 1182 1183static SYSCTL_NODE(_sysctl, 4, oidfmt, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1184 sysctl_sysctl_oidfmt, ""); 1185 1186static int 1187sysctl_sysctl_oiddescr(SYSCTL_HANDLER_ARGS) 1188{ 1189 struct sysctl_oid *oid; 1190 struct rm_priotracker tracker; 1191 int error; 1192 1193 SYSCTL_RLOCK(&tracker); 1194 error = sysctl_find_oid(arg1, arg2, &oid, NULL, req); 1195 if (error) 1196 goto out; 1197 1198 if (oid->oid_descr == NULL) { 1199 error = ENOENT; 1200 goto out; 1201 } 1202 error = SYSCTL_OUT(req, oid->oid_descr, strlen(oid->oid_descr) + 1); 1203 out: 1204 SYSCTL_RUNLOCK(&tracker); 1205 return (error); 1206} 1207 1208static SYSCTL_NODE(_sysctl, 5, oiddescr, CTLFLAG_RD|CTLFLAG_MPSAFE|CTLFLAG_CAPRD, 1209 sysctl_sysctl_oiddescr, ""); 1210 1211/* 1212 * Default "handler" functions. 1213 */ 1214 1215/* 1216 * Handle a bool. 1217 * Two cases: 1218 * a variable: point arg1 at it. 1219 * a constant: pass it in arg2. 1220 */ 1221 1222int 1223sysctl_handle_bool(SYSCTL_HANDLER_ARGS) 1224{ 1225 uint8_t temp; 1226 int error; 1227 1228 /* 1229 * Attempt to get a coherent snapshot by making a copy of the data. 1230 */ 1231 if (arg1) 1232 temp = *(bool *)arg1 ? 1 : 0; 1233 else 1234 temp = arg2 ? 1 : 0; 1235 1236 error = SYSCTL_OUT(req, &temp, sizeof(temp)); 1237 if (error || !req->newptr) 1238 return (error); 1239 1240 if (!arg1) 1241 error = EPERM; 1242 else { 1243 error = SYSCTL_IN(req, &temp, sizeof(temp)); 1244 if (!error) 1245 *(bool *)arg1 = temp ? 1 : 0; 1246 } 1247 return (error); 1248} 1249 1250/* 1251 * Handle an int8_t, signed or unsigned. 1252 * Two cases: 1253 * a variable: point arg1 at it. 1254 * a constant: pass it in arg2. 1255 */ 1256 1257int 1258sysctl_handle_8(SYSCTL_HANDLER_ARGS) 1259{ 1260 int8_t tmpout; 1261 int error = 0; 1262 1263 /* 1264 * Attempt to get a coherent snapshot by making a copy of the data. 1265 */ 1266 if (arg1) 1267 tmpout = *(int8_t *)arg1; 1268 else 1269 tmpout = arg2; 1270 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1271 1272 if (error || !req->newptr) 1273 return (error); 1274 1275 if (!arg1) 1276 error = EPERM; 1277 else 1278 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1279 return (error); 1280} 1281 1282/* 1283 * Handle an int16_t, signed or unsigned. 1284 * Two cases: 1285 * a variable: point arg1 at it. 1286 * a constant: pass it in arg2. 1287 */ 1288 1289int 1290sysctl_handle_16(SYSCTL_HANDLER_ARGS) 1291{ 1292 int16_t tmpout; 1293 int error = 0; 1294 1295 /* 1296 * Attempt to get a coherent snapshot by making a copy of the data. 1297 */ 1298 if (arg1) 1299 tmpout = *(int16_t *)arg1; 1300 else 1301 tmpout = arg2; 1302 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1303 1304 if (error || !req->newptr) 1305 return (error); 1306 1307 if (!arg1) 1308 error = EPERM; 1309 else 1310 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1311 return (error); 1312} 1313 1314/* 1315 * Handle an int32_t, signed or unsigned. 1316 * Two cases: 1317 * a variable: point arg1 at it. 1318 * a constant: pass it in arg2. 1319 */ 1320 1321int 1322sysctl_handle_32(SYSCTL_HANDLER_ARGS) 1323{ 1324 int32_t tmpout; 1325 int error = 0; 1326 1327 /* 1328 * Attempt to get a coherent snapshot by making a copy of the data. 1329 */ 1330 if (arg1) 1331 tmpout = *(int32_t *)arg1; 1332 else 1333 tmpout = arg2; 1334 error = SYSCTL_OUT(req, &tmpout, sizeof(tmpout)); 1335 1336 if (error || !req->newptr) 1337 return (error); 1338 1339 if (!arg1) 1340 error = EPERM; 1341 else 1342 error = SYSCTL_IN(req, arg1, sizeof(tmpout)); 1343 return (error); 1344} 1345 1346/* 1347 * Handle an int, signed or unsigned. 1348 * Two cases: 1349 * a variable: point arg1 at it. 1350 * a constant: pass it in arg2. 1351 */ 1352 1353int 1354sysctl_handle_int(SYSCTL_HANDLER_ARGS) 1355{ 1356 int tmpout, error = 0; 1357 1358 /* 1359 * Attempt to get a coherent snapshot by making a copy of the data. 1360 */ 1361 if (arg1) 1362 tmpout = *(int *)arg1; 1363 else 1364 tmpout = arg2; 1365 error = SYSCTL_OUT(req, &tmpout, sizeof(int)); 1366 1367 if (error || !req->newptr) 1368 return (error); 1369 1370 if (!arg1) 1371 error = EPERM; 1372 else 1373 error = SYSCTL_IN(req, arg1, sizeof(int)); 1374 return (error); 1375} 1376 1377/* 1378 * Based on on sysctl_handle_int() convert milliseconds into ticks. 1379 * Note: this is used by TCP. 1380 */ 1381 1382int 1383sysctl_msec_to_ticks(SYSCTL_HANDLER_ARGS) 1384{ 1385 int error, s, tt; 1386 1387 tt = *(int *)arg1; 1388 s = (int)((int64_t)tt * 1000 / hz); 1389 1390 error = sysctl_handle_int(oidp, &s, 0, req); 1391 if (error || !req->newptr) 1392 return (error); 1393 1394 tt = (int)((int64_t)s * hz / 1000); 1395 if (tt < 1) 1396 return (EINVAL); 1397 1398 *(int *)arg1 = tt; 1399 return (0); 1400} 1401 1402 1403/* 1404 * Handle a long, signed or unsigned. 1405 * Two cases: 1406 * a variable: point arg1 at it. 1407 * a constant: pass it in arg2. 1408 */ 1409 1410int 1411sysctl_handle_long(SYSCTL_HANDLER_ARGS) 1412{ 1413 int error = 0; 1414 long tmplong; 1415#ifdef SCTL_MASK32 1416 int tmpint; 1417#endif 1418 1419 /* 1420 * Attempt to get a coherent snapshot by making a copy of the data. 1421 */ 1422 if (arg1) 1423 tmplong = *(long *)arg1; 1424 else 1425 tmplong = arg2; 1426#ifdef SCTL_MASK32 1427 if (req->flags & SCTL_MASK32) { 1428 tmpint = tmplong; 1429 error = SYSCTL_OUT(req, &tmpint, sizeof(int)); 1430 } else 1431#endif 1432 error = SYSCTL_OUT(req, &tmplong, sizeof(long)); 1433 1434 if (error || !req->newptr) 1435 return (error); 1436 1437 if (!arg1) 1438 error = EPERM; 1439#ifdef SCTL_MASK32 1440 else if (req->flags & SCTL_MASK32) { 1441 error = SYSCTL_IN(req, &tmpint, sizeof(int)); 1442 *(long *)arg1 = (long)tmpint; 1443 } 1444#endif 1445 else 1446 error = SYSCTL_IN(req, arg1, sizeof(long)); 1447 return (error); 1448} 1449 1450/* 1451 * Handle a 64 bit int, signed or unsigned. 1452 * Two cases: 1453 * a variable: point arg1 at it. 1454 * a constant: pass it in arg2. 1455 */ 1456int 1457sysctl_handle_64(SYSCTL_HANDLER_ARGS) 1458{ 1459 int error = 0; 1460 uint64_t tmpout; 1461 1462 /* 1463 * Attempt to get a coherent snapshot by making a copy of the data. 1464 */ 1465 if (arg1) 1466 tmpout = *(uint64_t *)arg1; 1467 else 1468 tmpout = arg2; 1469 error = SYSCTL_OUT(req, &tmpout, sizeof(uint64_t)); 1470 1471 if (error || !req->newptr) 1472 return (error); 1473 1474 if (!arg1) 1475 error = EPERM; 1476 else 1477 error = SYSCTL_IN(req, arg1, sizeof(uint64_t)); 1478 return (error); 1479} 1480 1481/* 1482 * Handle our generic '\0' terminated 'C' string. 1483 * Two cases: 1484 * a variable string: point arg1 at it, arg2 is max length. 1485 * a constant string: point arg1 at it, arg2 is zero. 1486 */ 1487 1488int 1489sysctl_handle_string(SYSCTL_HANDLER_ARGS) 1490{ 1491 size_t outlen; 1492 int error = 0, ro_string = 0; 1493 1494 /* 1495 * A zero-length buffer indicates a fixed size read-only 1496 * string: 1497 */ 1498 if (arg2 == 0) { 1499 arg2 = strlen((char *)arg1) + 1; 1500 ro_string = 1; 1501 } 1502 1503 if (req->oldptr != NULL) { 1504 char *tmparg; 1505 1506 if (ro_string) { 1507 tmparg = arg1; 1508 } else { 1509 /* try to make a coherent snapshot of the string */ 1510 tmparg = malloc(arg2, M_SYSCTLTMP, M_WAITOK); 1511 memcpy(tmparg, arg1, arg2); 1512 } 1513 1514 outlen = strnlen(tmparg, arg2 - 1) + 1; 1515 error = SYSCTL_OUT(req, tmparg, outlen); 1516 1517 if (!ro_string) 1518 free(tmparg, M_SYSCTLTMP); 1519 } else { 1520 outlen = strnlen((char *)arg1, arg2 - 1) + 1; 1521 error = SYSCTL_OUT(req, NULL, outlen); 1522 } 1523 if (error || !req->newptr) 1524 return (error); 1525 1526 if ((req->newlen - req->newidx) >= arg2) { 1527 error = EINVAL; 1528 } else { 1529 arg2 = (req->newlen - req->newidx); 1530 error = SYSCTL_IN(req, arg1, arg2); 1531 ((char *)arg1)[arg2] = '\0'; 1532 } 1533 return (error); 1534} 1535 1536/* 1537 * Handle any kind of opaque data. 1538 * arg1 points to it, arg2 is the size. 1539 */ 1540 1541int 1542sysctl_handle_opaque(SYSCTL_HANDLER_ARGS) 1543{ 1544 int error, tries; 1545 u_int generation; 1546 struct sysctl_req req2; 1547 1548 /* 1549 * Attempt to get a coherent snapshot, by using the thread 1550 * pre-emption counter updated from within mi_switch() to 1551 * determine if we were pre-empted during a bcopy() or 1552 * copyout(). Make 3 attempts at doing this before giving up. 1553 * If we encounter an error, stop immediately. 1554 */ 1555 tries = 0; 1556 req2 = *req; 1557retry: 1558 generation = curthread->td_generation; 1559 error = SYSCTL_OUT(req, arg1, arg2); 1560 if (error) 1561 return (error); 1562 tries++; 1563 if (generation != curthread->td_generation && tries < 3) { 1564 *req = req2; 1565 goto retry; 1566 } 1567 1568 error = SYSCTL_IN(req, arg1, arg2); 1569 1570 return (error); 1571} 1572 1573/* 1574 * Transfer functions to/from kernel space. 1575 * XXX: rather untested at this point 1576 */ 1577static int 1578sysctl_old_kernel(struct sysctl_req *req, const void *p, size_t l) 1579{ 1580 size_t i = 0; 1581 1582 if (req->oldptr) { 1583 i = l; 1584 if (req->oldlen <= req->oldidx) 1585 i = 0; 1586 else 1587 if (i > req->oldlen - req->oldidx) 1588 i = req->oldlen - req->oldidx; 1589 if (i > 0) 1590 bcopy(p, (char *)req->oldptr + req->oldidx, i); 1591 } 1592 req->oldidx += l; 1593 if (req->oldptr && i != l) 1594 return (ENOMEM); 1595 return (0); 1596} 1597 1598static int 1599sysctl_new_kernel(struct sysctl_req *req, void *p, size_t l) 1600{ 1601 if (!req->newptr) 1602 return (0); 1603 if (req->newlen - req->newidx < l) 1604 return (EINVAL); 1605 bcopy((char *)req->newptr + req->newidx, p, l); 1606 req->newidx += l; 1607 return (0); 1608} 1609 1610int 1611kernel_sysctl(struct thread *td, int *name, u_int namelen, void *old, 1612 size_t *oldlenp, void *new, size_t newlen, size_t *retval, int flags) 1613{ 1614 int error = 0; 1615 struct sysctl_req req; 1616 1617 bzero(&req, sizeof req); 1618 1619 req.td = td; 1620 req.flags = flags; 1621 1622 if (oldlenp) { 1623 req.oldlen = *oldlenp; 1624 } 1625 req.validlen = req.oldlen; 1626 1627 if (old) { 1628 req.oldptr= old; 1629 } 1630 1631 if (new != NULL) { 1632 req.newlen = newlen; 1633 req.newptr = new; 1634 } 1635 1636 req.oldfunc = sysctl_old_kernel; 1637 req.newfunc = sysctl_new_kernel; 1638 req.lock = REQ_UNWIRED; 1639 1640 error = sysctl_root(0, name, namelen, &req); 1641 1642 if (req.lock == REQ_WIRED && req.validlen > 0) 1643 vsunlock(req.oldptr, req.validlen); 1644 1645 if (error && error != ENOMEM) 1646 return (error); 1647 1648 if (retval) { 1649 if (req.oldptr && req.oldidx > req.validlen) 1650 *retval = req.validlen; 1651 else 1652 *retval = req.oldidx; 1653 } 1654 return (error); 1655} 1656 1657int 1658kernel_sysctlbyname(struct thread *td, char *name, void *old, size_t *oldlenp, 1659 void *new, size_t newlen, size_t *retval, int flags) 1660{ 1661 int oid[CTL_MAXNAME]; 1662 size_t oidlen, plen; 1663 int error; 1664 1665 oid[0] = 0; /* sysctl internal magic */ 1666 oid[1] = 3; /* name2oid */ 1667 oidlen = sizeof(oid); 1668 1669 error = kernel_sysctl(td, oid, 2, oid, &oidlen, 1670 (void *)name, strlen(name), &plen, flags); 1671 if (error) 1672 return (error); 1673 1674 error = kernel_sysctl(td, oid, plen / sizeof(int), old, oldlenp, 1675 new, newlen, retval, flags); 1676 return (error); 1677} 1678 1679/* 1680 * Transfer function to/from user space. 1681 */ 1682static int 1683sysctl_old_user(struct sysctl_req *req, const void *p, size_t l) 1684{ 1685 size_t i, len, origidx; 1686 int error; 1687 1688 origidx = req->oldidx; 1689 req->oldidx += l; 1690 if (req->oldptr == NULL) 1691 return (0); 1692 /* 1693 * If we have not wired the user supplied buffer and we are currently 1694 * holding locks, drop a witness warning, as it's possible that 1695 * write operations to the user page can sleep. 1696 */ 1697 if (req->lock != REQ_WIRED) 1698 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1699 "sysctl_old_user()"); 1700 i = l; 1701 len = req->validlen; 1702 if (len <= origidx) 1703 i = 0; 1704 else { 1705 if (i > len - origidx) 1706 i = len - origidx; 1707 if (req->lock == REQ_WIRED) { 1708 error = copyout_nofault(p, (char *)req->oldptr + 1709 origidx, i); 1710 } else 1711 error = copyout(p, (char *)req->oldptr + origidx, i); 1712 if (error != 0) 1713 return (error); 1714 } 1715 if (i < l) 1716 return (ENOMEM); 1717 return (0); 1718} 1719 1720static int 1721sysctl_new_user(struct sysctl_req *req, void *p, size_t l) 1722{ 1723 int error; 1724 1725 if (!req->newptr) 1726 return (0); 1727 if (req->newlen - req->newidx < l) 1728 return (EINVAL); 1729 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 1730 "sysctl_new_user()"); 1731 error = copyin((char *)req->newptr + req->newidx, p, l); 1732 req->newidx += l; 1733 return (error); 1734} 1735 1736/* 1737 * Wire the user space destination buffer. If set to a value greater than 1738 * zero, the len parameter limits the maximum amount of wired memory. 1739 */ 1740int 1741sysctl_wire_old_buffer(struct sysctl_req *req, size_t len) 1742{ 1743 int ret; 1744 size_t wiredlen; 1745 1746 wiredlen = (len > 0 && len < req->oldlen) ? len : req->oldlen; 1747 ret = 0; 1748 if (req->lock != REQ_WIRED && req->oldptr && 1749 req->oldfunc == sysctl_old_user) { 1750 if (wiredlen != 0) { 1751 ret = vslock(req->oldptr, wiredlen); 1752 if (ret != 0) { 1753 if (ret != ENOMEM) 1754 return (ret); 1755 wiredlen = 0; 1756 } 1757 } 1758 req->lock = REQ_WIRED; 1759 req->validlen = wiredlen; 1760 } 1761 return (0); 1762} 1763 1764int 1765sysctl_find_oid(int *name, u_int namelen, struct sysctl_oid **noid, 1766 int *nindx, struct sysctl_req *req) 1767{ 1768 struct sysctl_oid_list *lsp; 1769 struct sysctl_oid *oid; 1770 int indx; 1771 1772 SYSCTL_ASSERT_LOCKED(); 1773 lsp = &sysctl__children; 1774 indx = 0; 1775 while (indx < CTL_MAXNAME) { 1776 SLIST_FOREACH(oid, lsp, oid_link) { 1777 if (oid->oid_number == name[indx]) 1778 break; 1779 } 1780 if (oid == NULL) 1781 return (ENOENT); 1782 1783 indx++; 1784 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1785 if (oid->oid_handler != NULL || indx == namelen) { 1786 *noid = oid; 1787 if (nindx != NULL) 1788 *nindx = indx; 1789 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1790 ("%s found DYING node %p", __func__, oid)); 1791 return (0); 1792 } 1793 lsp = SYSCTL_CHILDREN(oid); 1794 } else if (indx == namelen) { 1795 if ((oid->oid_kind & CTLFLAG_DORMANT) != 0) 1796 return (ENOENT); 1797 *noid = oid; 1798 if (nindx != NULL) 1799 *nindx = indx; 1800 KASSERT((oid->oid_kind & CTLFLAG_DYING) == 0, 1801 ("%s found DYING node %p", __func__, oid)); 1802 return (0); 1803 } else { 1804 return (ENOTDIR); 1805 } 1806 } 1807 return (ENOENT); 1808} 1809 1810/* 1811 * Traverse our tree, and find the right node, execute whatever it points 1812 * to, and return the resulting error code. 1813 */ 1814 1815static int 1816sysctl_root(SYSCTL_HANDLER_ARGS) 1817{ 1818 struct sysctl_oid *oid; 1819 struct rm_priotracker tracker; 1820 int error, indx, lvl; 1821 1822 SYSCTL_RLOCK(&tracker); 1823 1824 error = sysctl_find_oid(arg1, arg2, &oid, &indx, req); 1825 if (error) 1826 goto out; 1827 1828 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1829 /* 1830 * You can't call a sysctl when it's a node, but has 1831 * no handler. Inform the user that it's a node. 1832 * The indx may or may not be the same as namelen. 1833 */ 1834 if (oid->oid_handler == NULL) { 1835 error = EISDIR; 1836 goto out; 1837 } 1838 } 1839 1840 /* Is this sysctl writable? */ 1841 if (req->newptr && !(oid->oid_kind & CTLFLAG_WR)) { 1842 error = EPERM; 1843 goto out; 1844 } 1845 1846 KASSERT(req->td != NULL, ("sysctl_root(): req->td == NULL")); 1847 1848#ifdef CAPABILITY_MODE 1849 /* 1850 * If the process is in capability mode, then don't permit reading or 1851 * writing unless specifically granted for the node. 1852 */ 1853 if (IN_CAPABILITY_MODE(req->td)) { 1854 if ((req->oldptr && !(oid->oid_kind & CTLFLAG_CAPRD)) || 1855 (req->newptr && !(oid->oid_kind & CTLFLAG_CAPWR))) { 1856 error = EPERM; 1857 goto out; 1858 } 1859 } 1860#endif 1861 1862 /* Is this sysctl sensitive to securelevels? */ 1863 if (req->newptr && (oid->oid_kind & CTLFLAG_SECURE)) { 1864 lvl = (oid->oid_kind & CTLMASK_SECURE) >> CTLSHIFT_SECURE; 1865 error = securelevel_gt(req->td->td_ucred, lvl); 1866 if (error) 1867 goto out; 1868 } 1869 1870 /* Is this sysctl writable by only privileged users? */ 1871 if (req->newptr && !(oid->oid_kind & CTLFLAG_ANYBODY)) { 1872 int priv; 1873 1874 if (oid->oid_kind & CTLFLAG_PRISON) 1875 priv = PRIV_SYSCTL_WRITEJAIL; 1876#ifdef VIMAGE 1877 else if ((oid->oid_kind & CTLFLAG_VNET) && 1878 prison_owns_vnet(req->td->td_ucred)) 1879 priv = PRIV_SYSCTL_WRITEJAIL; 1880#endif 1881 else 1882 priv = PRIV_SYSCTL_WRITE; 1883 error = priv_check(req->td, priv); 1884 if (error) 1885 goto out; 1886 } 1887 1888 if (!oid->oid_handler) { 1889 error = EINVAL; 1890 goto out; 1891 } 1892 1893 if ((oid->oid_kind & CTLTYPE) == CTLTYPE_NODE) { 1894 arg1 = (int *)arg1 + indx; 1895 arg2 -= indx; 1896 } else { 1897 arg1 = oid->oid_arg1; 1898 arg2 = oid->oid_arg2; 1899 } 1900#ifdef MAC 1901 error = mac_system_check_sysctl(req->td->td_ucred, oid, arg1, arg2, 1902 req); 1903 if (error != 0) 1904 goto out; 1905#endif 1906#ifdef VIMAGE 1907 if ((oid->oid_kind & CTLFLAG_VNET) && arg1 != NULL) 1908 arg1 = (void *)(curvnet->vnet_data_base + (uintptr_t)arg1); 1909#endif 1910 error = sysctl_root_handler_locked(oid, arg1, arg2, req, &tracker); 1911 1912out: 1913 SYSCTL_RUNLOCK(&tracker); 1914 return (error); 1915} 1916 1917#ifndef _SYS_SYSPROTO_H_ 1918struct sysctl_args { 1919 int *name; 1920 u_int namelen; 1921 void *old; 1922 size_t *oldlenp; 1923 void *new; 1924 size_t newlen; 1925}; 1926#endif 1927int 1928sys___sysctl(struct thread *td, struct sysctl_args *uap) 1929{ 1930 int error, i, name[CTL_MAXNAME]; 1931 size_t j; 1932 1933 if (uap->namelen > CTL_MAXNAME || uap->namelen < 2) 1934 return (EINVAL); 1935 1936 error = copyin(uap->name, &name, uap->namelen * sizeof(int)); 1937 if (error) 1938 return (error); 1939 1940 error = userland_sysctl(td, name, uap->namelen, 1941 uap->old, uap->oldlenp, 0, 1942 uap->new, uap->newlen, &j, 0); 1943 if (error && error != ENOMEM) 1944 return (error); 1945 if (uap->oldlenp) { 1946 i = copyout(&j, uap->oldlenp, sizeof(j)); 1947 if (i) 1948 return (i); 1949 } 1950 return (error); 1951} 1952 1953/* 1954 * This is used from various compatibility syscalls too. That's why name 1955 * must be in kernel space. 1956 */ 1957int 1958userland_sysctl(struct thread *td, int *name, u_int namelen, void *old, 1959 size_t *oldlenp, int inkernel, void *new, size_t newlen, size_t *retval, 1960 int flags) 1961{ 1962 int error = 0, memlocked; 1963 struct sysctl_req req; 1964 1965 bzero(&req, sizeof req); 1966 1967 req.td = td; 1968 req.flags = flags; 1969 1970 if (oldlenp) { 1971 if (inkernel) { 1972 req.oldlen = *oldlenp; 1973 } else { 1974 error = copyin(oldlenp, &req.oldlen, sizeof(*oldlenp)); 1975 if (error) 1976 return (error); 1977 } 1978 } 1979 req.validlen = req.oldlen; 1980 1981 if (old) { 1982 if (!useracc(old, req.oldlen, VM_PROT_WRITE)) 1983 return (EFAULT); 1984 req.oldptr= old; 1985 } 1986 1987 if (new != NULL) { 1988 if (!useracc(new, newlen, VM_PROT_READ)) 1989 return (EFAULT); 1990 req.newlen = newlen; 1991 req.newptr = new; 1992 } 1993 1994 req.oldfunc = sysctl_old_user; 1995 req.newfunc = sysctl_new_user; 1996 req.lock = REQ_UNWIRED; 1997 1998#ifdef KTRACE 1999 if (KTRPOINT(curthread, KTR_SYSCTL)) 2000 ktrsysctl(name, namelen); 2001#endif 2002 2003 if (req.oldptr && req.oldlen > PAGE_SIZE) { 2004 memlocked = 1; 2005 sx_xlock(&sysctlmemlock); 2006 } else 2007 memlocked = 0; 2008 CURVNET_SET(TD_TO_VNET(td)); 2009 2010 for (;;) { 2011 req.oldidx = 0; 2012 req.newidx = 0; 2013 error = sysctl_root(0, name, namelen, &req); 2014 if (error != EAGAIN) 2015 break; 2016 kern_yield(PRI_USER); 2017 } 2018 2019 CURVNET_RESTORE(); 2020 2021 if (req.lock == REQ_WIRED && req.validlen > 0) 2022 vsunlock(req.oldptr, req.validlen); 2023 if (memlocked) 2024 sx_xunlock(&sysctlmemlock); 2025 2026 if (error && error != ENOMEM) 2027 return (error); 2028 2029 if (retval) { 2030 if (req.oldptr && req.oldidx > req.validlen) 2031 *retval = req.validlen; 2032 else 2033 *retval = req.oldidx; 2034 } 2035 return (error); 2036} 2037 2038/* 2039 * Drain into a sysctl struct. The user buffer should be wired if a page 2040 * fault would cause issue. 2041 */ 2042static int 2043sbuf_sysctl_drain(void *arg, const char *data, int len) 2044{ 2045 struct sysctl_req *req = arg; 2046 int error; 2047 2048 error = SYSCTL_OUT(req, data, len); 2049 KASSERT(error >= 0, ("Got unexpected negative value %d", error)); 2050 return (error == 0 ? len : -error); 2051} 2052 2053struct sbuf * 2054sbuf_new_for_sysctl(struct sbuf *s, char *buf, int length, 2055 struct sysctl_req *req) 2056{ 2057 2058 /* Supply a default buffer size if none given. */ 2059 if (buf == NULL && length == 0) 2060 length = 64; 2061 s = sbuf_new(s, buf, length, SBUF_FIXEDLEN | SBUF_INCLUDENUL); 2062 sbuf_set_drain(s, sbuf_sysctl_drain, req); 2063 return (s); 2064} 2065