mac_framework.c revision 165411
1/*- 2 * Copyright (c) 1999-2002 Robert N. M. Watson 3 * Copyright (c) 2001 Ilmar S. Habibulin 4 * Copyright (c) 2001-2005 Networks Associates Technology, Inc. 5 * Copyright (c) 2005-2006 SPARTA, Inc. 6 * All rights reserved. 7 * 8 * This software was developed by Robert Watson and Ilmar Habibulin for the 9 * TrustedBSD Project. 10 * 11 * This software was developed for the FreeBSD Project in part by Network 12 * Associates Laboratories, the Security Research Division of Network 13 * Associates, Inc. under DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), 14 * as part of the DARPA CHATS research program. 15 * 16 * This software was enhanced by SPARTA ISSO under SPAWAR contract 17 * N66001-04-C-6019 ("SEFOS"). 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 */ 40 41/*- 42 * Framework for extensible kernel access control. This file contains 43 * Kernel and userland interface to the framework, policy registration 44 * and composition. Per-object interfaces, controls, and labeling may be 45 * found in src/sys/security/mac/. Sample policies may be found in 46 * src/sys/security/mac_*. 47 */ 48 49#include <sys/cdefs.h> 50__FBSDID("$FreeBSD: head/sys/security/mac/mac_framework.c 165411 2006-12-20 20:38:44Z rwatson $"); 51 52#include "opt_mac.h" 53 54#include <sys/param.h> 55#include <sys/condvar.h> 56#include <sys/extattr.h> 57#include <sys/imgact.h> 58#include <sys/kernel.h> 59#include <sys/lock.h> 60#include <sys/malloc.h> 61#include <sys/mutex.h> 62#include <sys/mac.h> 63#include <sys/module.h> 64#include <sys/proc.h> 65#include <sys/sbuf.h> 66#include <sys/systm.h> 67#include <sys/sysproto.h> 68#include <sys/sysent.h> 69#include <sys/vnode.h> 70#include <sys/mount.h> 71#include <sys/file.h> 72#include <sys/namei.h> 73#include <sys/socket.h> 74#include <sys/pipe.h> 75#include <sys/socketvar.h> 76#include <sys/sysctl.h> 77 78#include <vm/vm.h> 79#include <vm/pmap.h> 80#include <vm/vm_map.h> 81#include <vm/vm_object.h> 82 83#include <sys/mac_policy.h> 84 85#include <fs/devfs/devfs.h> 86 87#include <net/bpfdesc.h> 88#include <net/if.h> 89#include <net/if_var.h> 90 91#include <netinet/in.h> 92#include <netinet/ip_var.h> 93 94#include <security/mac/mac_framework.h> 95#include <security/mac/mac_internal.h> 96 97#ifdef MAC 98 99/* 100 * Declare that the kernel provides MAC support, version 1. This permits 101 * modules to refuse to be loaded if the necessary support isn't present, 102 * even if it's pre-boot. 103 */ 104MODULE_VERSION(kernel_mac_support, 3); 105 106SYSCTL_NODE(_security, OID_AUTO, mac, CTLFLAG_RW, 0, 107 "TrustedBSD MAC policy controls"); 108 109/* 110 * Labels consist of a indexed set of "slots", which are allocated policies 111 * as required. The MAC Framework maintains a bitmask of slots allocated so 112 * far to prevent reuse. Slots cannot be reused, as the MAC Framework 113 * guarantees that newly allocated slots in labels will be NULL unless 114 * otherwise initialized, and because we do not have a mechanism to garbage 115 * collect slots on policy unload. As labeled policies tend to be statically 116 * loaded during boot, and not frequently unloaded and reloaded, this is not 117 * generally an issue. 118 */ 119#if MAC_MAX_SLOTS > 32 120#error "MAC_MAX_SLOTS too large" 121#endif 122 123static unsigned int mac_max_slots = MAC_MAX_SLOTS; 124static unsigned int mac_slot_offsets_free = (1 << MAC_MAX_SLOTS) - 1; 125SYSCTL_UINT(_security_mac, OID_AUTO, max_slots, CTLFLAG_RD, 126 &mac_max_slots, 0, ""); 127 128/* 129 * Has the kernel started generating labeled objects yet? All read/write 130 * access to this variable is serialized during the boot process. Following 131 * the end of serialization, we don't update this flag; no locking. 132 */ 133int mac_late = 0; 134 135/* 136 * Flag to indicate whether or not we should allocate label storage for new 137 * mbufs. Since most dynamic policies we currently work with don't rely on 138 * mbuf labeling, try to avoid paying the cost of mtag allocation unless 139 * specifically notified of interest. One result of this is that if a 140 * dynamically loaded policy requests mbuf labels, it must be able to deal 141 * with a NULL label being returned on any mbufs that were already in flight 142 * when the policy was loaded. Since the policy already has to deal with 143 * uninitialized labels, this probably won't be a problem. Note: currently 144 * no locking. Will this be a problem? 145 * 146 * In the future, we may want to allow objects to request labeling on a per- 147 * object type basis, rather than globally for all objects. 148 */ 149#ifndef MAC_ALWAYS_LABEL_MBUF 150int mac_labelmbufs = 0; 151#endif 152 153static int mac_policy_register(struct mac_policy_conf *mpc); 154static int mac_policy_unregister(struct mac_policy_conf *mpc); 155 156MALLOC_DEFINE(M_MACTEMP, "mactemp", "MAC temporary label storage"); 157 158/* 159 * mac_static_policy_list holds a list of policy modules that are not loaded 160 * while the system is "live", and cannot be unloaded. These policies can be 161 * invoked without holding the busy count. 162 * 163 * mac_policy_list stores the list of dynamic policies. A busy count is 164 * maintained for the list, stored in mac_policy_busy. The busy count is 165 * protected by mac_policy_mtx; the list may be modified only while the busy 166 * count is 0, requiring that the lock be held to prevent new references to 167 * the list from being acquired. For almost all operations, incrementing the 168 * busy count is sufficient to guarantee consistency, as the list cannot be 169 * modified while the busy count is elevated. For a few special operations 170 * involving a change to the list of active policies, the mtx itself must be 171 * held. A condition variable, mac_policy_cv, is used to signal potential 172 * exclusive consumers that they should try to acquire the lock if a first 173 * attempt at exclusive access fails. 174 * 175 * This design intentionally avoids fairness, and may starve attempts to 176 * acquire an exclusive lock on a busy system. This is required because we 177 * do not ever want acquiring a read reference to perform an unbounded length 178 * sleep. Read references are acquired in ithreads, network isrs, etc, and 179 * any unbounded blocking could lead quickly to deadlock. 180 * 181 * Another reason for never blocking on read references is that the MAC 182 * Framework may recurse: if a policy calls a VOP, for example, this might 183 * lead to vnode life cycle operations (such as init/destroy). 184 */ 185#ifndef MAC_STATIC 186static struct mtx mac_policy_mtx; 187static struct cv mac_policy_cv; 188static int mac_policy_count; 189#endif 190struct mac_policy_list_head mac_policy_list; 191struct mac_policy_list_head mac_static_policy_list; 192 193/* 194 * We manually invoke WITNESS_WARN() to allow Witness to generate warnings 195 * even if we don't end up ever triggering the wait at run-time. The 196 * consumer of the exclusive interface must not hold any locks (other than 197 * potentially Giant) since we may sleep for long (potentially indefinite) 198 * periods of time waiting for the framework to become quiescent so that a 199 * policy list change may be made. 200 */ 201void 202mac_policy_grab_exclusive(void) 203{ 204 205#ifndef MAC_STATIC 206 if (!mac_late) 207 return; 208 209 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, 210 "mac_policy_grab_exclusive() at %s:%d", __FILE__, __LINE__); 211 mtx_lock(&mac_policy_mtx); 212 while (mac_policy_count != 0) 213 cv_wait(&mac_policy_cv, &mac_policy_mtx); 214#endif 215} 216 217void 218mac_policy_assert_exclusive(void) 219{ 220 221#ifndef MAC_STATIC 222 if (!mac_late) 223 return; 224 225 mtx_assert(&mac_policy_mtx, MA_OWNED); 226 KASSERT(mac_policy_count == 0, 227 ("mac_policy_assert_exclusive(): not exclusive")); 228#endif 229} 230 231void 232mac_policy_release_exclusive(void) 233{ 234 235#ifndef MAC_STATIC 236 if (!mac_late) 237 return; 238 239 KASSERT(mac_policy_count == 0, 240 ("mac_policy_release_exclusive(): not exclusive")); 241 mtx_unlock(&mac_policy_mtx); 242 cv_signal(&mac_policy_cv); 243#endif 244} 245 246void 247mac_policy_list_busy(void) 248{ 249 250#ifndef MAC_STATIC 251 if (!mac_late) 252 return; 253 254 mtx_lock(&mac_policy_mtx); 255 mac_policy_count++; 256 mtx_unlock(&mac_policy_mtx); 257#endif 258} 259 260int 261mac_policy_list_conditional_busy(void) 262{ 263#ifndef MAC_STATIC 264 int ret; 265 266 if (!mac_late) 267 return (1); 268 269 mtx_lock(&mac_policy_mtx); 270 if (!LIST_EMPTY(&mac_policy_list)) { 271 mac_policy_count++; 272 ret = 1; 273 } else 274 ret = 0; 275 mtx_unlock(&mac_policy_mtx); 276 return (ret); 277#else 278 if (!mac_late) 279 return (1); 280 281 return (1); 282#endif 283} 284 285void 286mac_policy_list_unbusy(void) 287{ 288 289#ifndef MAC_STATIC 290 if (!mac_late) 291 return; 292 293 mtx_lock(&mac_policy_mtx); 294 mac_policy_count--; 295 KASSERT(mac_policy_count >= 0, ("MAC_POLICY_LIST_LOCK")); 296 if (mac_policy_count == 0) 297 cv_signal(&mac_policy_cv); 298 mtx_unlock(&mac_policy_mtx); 299#endif 300} 301 302/* 303 * Initialize the MAC subsystem, including appropriate SMP locks. 304 */ 305static void 306mac_init(void) 307{ 308 309 LIST_INIT(&mac_static_policy_list); 310 LIST_INIT(&mac_policy_list); 311 mac_labelzone_init(); 312 313#ifndef MAC_STATIC 314 mtx_init(&mac_policy_mtx, "mac_policy_mtx", NULL, MTX_DEF); 315 cv_init(&mac_policy_cv, "mac_policy_cv"); 316#endif 317} 318 319/* 320 * For the purposes of modules that want to know if they were loaded "early", 321 * set the mac_late flag once we've processed modules either linked into the 322 * kernel, or loaded before the kernel startup. 323 */ 324static void 325mac_late_init(void) 326{ 327 328 mac_late = 1; 329} 330 331/* 332 * After the policy list has changed, walk the list to update any global 333 * flags. Currently, we support only one flag, and it's conditionally 334 * defined; as a result, the entire function is conditional. Eventually, the 335 * #else case might also iterate across the policies. 336 */ 337static void 338mac_policy_updateflags(void) 339{ 340#ifndef MAC_ALWAYS_LABEL_MBUF 341 struct mac_policy_conf *tmpc; 342 int labelmbufs; 343 344 mac_policy_assert_exclusive(); 345 346 labelmbufs = 0; 347 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) { 348 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS) 349 labelmbufs++; 350 } 351 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) { 352 if (tmpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_LABELMBUFS) 353 labelmbufs++; 354 } 355 mac_labelmbufs = (labelmbufs != 0); 356#endif 357} 358 359/* 360 * Allow MAC policy modules to register during boot, etc. 361 */ 362int 363mac_policy_modevent(module_t mod, int type, void *data) 364{ 365 struct mac_policy_conf *mpc; 366 int error; 367 368 error = 0; 369 mpc = (struct mac_policy_conf *) data; 370 371#ifdef MAC_STATIC 372 if (mac_late) { 373 printf("mac_policy_modevent: MAC_STATIC and late\n"); 374 return (EBUSY); 375 } 376#endif 377 378 switch (type) { 379 case MOD_LOAD: 380 if (mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_NOTLATE && 381 mac_late) { 382 printf("mac_policy_modevent: can't load %s policy " 383 "after booting\n", mpc->mpc_name); 384 error = EBUSY; 385 break; 386 } 387 error = mac_policy_register(mpc); 388 break; 389 case MOD_UNLOAD: 390 /* Don't unregister the module if it was never registered. */ 391 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) 392 != 0) 393 error = mac_policy_unregister(mpc); 394 else 395 error = 0; 396 break; 397 default: 398 error = EOPNOTSUPP; 399 break; 400 } 401 402 return (error); 403} 404 405static int 406mac_policy_register(struct mac_policy_conf *mpc) 407{ 408 struct mac_policy_conf *tmpc; 409 int error, slot, static_entry; 410 411 error = 0; 412 413 /* 414 * We don't technically need exclusive access while !mac_late, but 415 * hold it for assertion consistency. 416 */ 417 mac_policy_grab_exclusive(); 418 419 /* 420 * If the module can potentially be unloaded, or we're loading late, 421 * we have to stick it in the non-static list and pay an extra 422 * performance overhead. Otherwise, we can pay a light locking cost 423 * and stick it in the static list. 424 */ 425 static_entry = (!mac_late && 426 !(mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK)); 427 428 if (static_entry) { 429 LIST_FOREACH(tmpc, &mac_static_policy_list, mpc_list) { 430 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 431 error = EEXIST; 432 goto out; 433 } 434 } 435 } else { 436 LIST_FOREACH(tmpc, &mac_policy_list, mpc_list) { 437 if (strcmp(tmpc->mpc_name, mpc->mpc_name) == 0) { 438 error = EEXIST; 439 goto out; 440 } 441 } 442 } 443 if (mpc->mpc_field_off != NULL) { 444 slot = ffs(mac_slot_offsets_free); 445 if (slot == 0) { 446 error = ENOMEM; 447 goto out; 448 } 449 slot--; 450 mac_slot_offsets_free &= ~(1 << slot); 451 *mpc->mpc_field_off = slot; 452 } 453 mpc->mpc_runtime_flags |= MPC_RUNTIME_FLAG_REGISTERED; 454 455 /* 456 * If we're loading a MAC module after the framework has initialized, 457 * it has to go into the dynamic list. If we're loading it before 458 * we've finished initializing, it can go into the static list with 459 * weaker locker requirements. 460 */ 461 if (static_entry) 462 LIST_INSERT_HEAD(&mac_static_policy_list, mpc, mpc_list); 463 else 464 LIST_INSERT_HEAD(&mac_policy_list, mpc, mpc_list); 465 466 /* 467 * Per-policy initialization. Currently, this takes place under the 468 * exclusive lock, so policies must not sleep in their init method. 469 * In the future, we may want to separate "init" from "start", with 470 * "init" occuring without the lock held. Likewise, on tear-down, 471 * breaking out "stop" from "destroy". 472 */ 473 if (mpc->mpc_ops->mpo_init != NULL) 474 (*(mpc->mpc_ops->mpo_init))(mpc); 475 mac_policy_updateflags(); 476 477 printf("Security policy loaded: %s (%s)\n", mpc->mpc_fullname, 478 mpc->mpc_name); 479 480out: 481 mac_policy_release_exclusive(); 482 return (error); 483} 484 485static int 486mac_policy_unregister(struct mac_policy_conf *mpc) 487{ 488 489 /* 490 * If we fail the load, we may get a request to unload. Check to see 491 * if we did the run-time registration, and if not, silently succeed. 492 */ 493 mac_policy_grab_exclusive(); 494 if ((mpc->mpc_runtime_flags & MPC_RUNTIME_FLAG_REGISTERED) == 0) { 495 mac_policy_release_exclusive(); 496 return (0); 497 } 498#if 0 499 /* 500 * Don't allow unloading modules with private data. 501 */ 502 if (mpc->mpc_field_off != NULL) { 503 MAC_POLICY_LIST_UNLOCK(); 504 return (EBUSY); 505 } 506#endif 507 /* 508 * Only allow the unload to proceed if the module is unloadable by 509 * its own definition. 510 */ 511 if ((mpc->mpc_loadtime_flags & MPC_LOADTIME_FLAG_UNLOADOK) == 0) { 512 mac_policy_release_exclusive(); 513 return (EBUSY); 514 } 515 if (mpc->mpc_ops->mpo_destroy != NULL) 516 (*(mpc->mpc_ops->mpo_destroy))(mpc); 517 518 LIST_REMOVE(mpc, mpc_list); 519 mpc->mpc_runtime_flags &= ~MPC_RUNTIME_FLAG_REGISTERED; 520 mac_policy_updateflags(); 521 522 mac_policy_release_exclusive(); 523 524 printf("Security policy unload: %s (%s)\n", mpc->mpc_fullname, 525 mpc->mpc_name); 526 527 return (0); 528} 529 530/* 531 * Define an error value precedence, and given two arguments, selects the 532 * value with the higher precedence. 533 */ 534int 535mac_error_select(int error1, int error2) 536{ 537 538 /* Certain decision-making errors take top priority. */ 539 if (error1 == EDEADLK || error2 == EDEADLK) 540 return (EDEADLK); 541 542 /* Invalid arguments should be reported where possible. */ 543 if (error1 == EINVAL || error2 == EINVAL) 544 return (EINVAL); 545 546 /* Precedence goes to "visibility", with both process and file. */ 547 if (error1 == ESRCH || error2 == ESRCH) 548 return (ESRCH); 549 550 if (error1 == ENOENT || error2 == ENOENT) 551 return (ENOENT); 552 553 /* Precedence goes to DAC/MAC protections. */ 554 if (error1 == EACCES || error2 == EACCES) 555 return (EACCES); 556 557 /* Precedence goes to privilege. */ 558 if (error1 == EPERM || error2 == EPERM) 559 return (EPERM); 560 561 /* Precedence goes to error over success; otherwise, arbitrary. */ 562 if (error1 != 0) 563 return (error1); 564 return (error2); 565} 566 567void 568mac_init_label(struct label *label) 569{ 570 571 bzero(label, sizeof(*label)); 572 label->l_flags = MAC_FLAG_INITIALIZED; 573} 574 575void 576mac_destroy_label(struct label *label) 577{ 578 579 KASSERT(label->l_flags & MAC_FLAG_INITIALIZED, 580 ("destroying uninitialized label")); 581 582 bzero(label, sizeof(*label)); 583 /* implicit: label->l_flags &= ~MAC_FLAG_INITIALIZED; */ 584} 585 586int 587mac_check_structmac_consistent(struct mac *mac) 588{ 589 590 if (mac->m_buflen < 0 || 591 mac->m_buflen > MAC_MAX_LABEL_BUF_LEN) 592 return (EINVAL); 593 594 return (0); 595} 596 597/* 598 * MPSAFE 599 */ 600int 601__mac_get_pid(struct thread *td, struct __mac_get_pid_args *uap) 602{ 603 char *elements, *buffer; 604 struct mac mac; 605 struct proc *tproc; 606 struct ucred *tcred; 607 int error; 608 609 error = copyin(uap->mac_p, &mac, sizeof(mac)); 610 if (error) 611 return (error); 612 613 error = mac_check_structmac_consistent(&mac); 614 if (error) 615 return (error); 616 617 tproc = pfind(uap->pid); 618 if (tproc == NULL) 619 return (ESRCH); 620 621 tcred = NULL; /* Satisfy gcc. */ 622 error = p_cansee(td, tproc); 623 if (error == 0) 624 tcred = crhold(tproc->p_ucred); 625 PROC_UNLOCK(tproc); 626 if (error) 627 return (error); 628 629 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 630 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 631 if (error) { 632 free(elements, M_MACTEMP); 633 crfree(tcred); 634 return (error); 635 } 636 637 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 638 error = mac_externalize_cred_label(tcred->cr_label, elements, 639 buffer, mac.m_buflen); 640 if (error == 0) 641 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 642 643 free(buffer, M_MACTEMP); 644 free(elements, M_MACTEMP); 645 crfree(tcred); 646 return (error); 647} 648 649/* 650 * MPSAFE 651 */ 652int 653__mac_get_proc(struct thread *td, struct __mac_get_proc_args *uap) 654{ 655 char *elements, *buffer; 656 struct mac mac; 657 int error; 658 659 error = copyin(uap->mac_p, &mac, sizeof(mac)); 660 if (error) 661 return (error); 662 663 error = mac_check_structmac_consistent(&mac); 664 if (error) 665 return (error); 666 667 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 668 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 669 if (error) { 670 free(elements, M_MACTEMP); 671 return (error); 672 } 673 674 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 675 error = mac_externalize_cred_label(td->td_ucred->cr_label, 676 elements, buffer, mac.m_buflen); 677 if (error == 0) 678 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 679 680 free(buffer, M_MACTEMP); 681 free(elements, M_MACTEMP); 682 return (error); 683} 684 685/* 686 * MPSAFE 687 */ 688int 689__mac_set_proc(struct thread *td, struct __mac_set_proc_args *uap) 690{ 691 struct ucred *newcred, *oldcred; 692 struct label *intlabel; 693 struct proc *p; 694 struct mac mac; 695 char *buffer; 696 int error; 697 698 error = copyin(uap->mac_p, &mac, sizeof(mac)); 699 if (error) 700 return (error); 701 702 error = mac_check_structmac_consistent(&mac); 703 if (error) 704 return (error); 705 706 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 707 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 708 if (error) { 709 free(buffer, M_MACTEMP); 710 return (error); 711 } 712 713 intlabel = mac_cred_label_alloc(); 714 error = mac_internalize_cred_label(intlabel, buffer); 715 free(buffer, M_MACTEMP); 716 if (error) 717 goto out; 718 719 newcred = crget(); 720 721 p = td->td_proc; 722 PROC_LOCK(p); 723 oldcred = p->p_ucred; 724 725 error = mac_check_cred_relabel(oldcred, intlabel); 726 if (error) { 727 PROC_UNLOCK(p); 728 crfree(newcred); 729 goto out; 730 } 731 732 setsugid(p); 733 crcopy(newcred, oldcred); 734 mac_relabel_cred(newcred, intlabel); 735 p->p_ucred = newcred; 736 737 /* 738 * Grab additional reference for use while revoking mmaps, prior to 739 * releasing the proc lock and sharing the cred. 740 */ 741 crhold(newcred); 742 PROC_UNLOCK(p); 743 744 if (mac_enforce_vm) { 745 mac_cred_mmapped_drop_perms(td, newcred); 746 } 747 748 crfree(newcred); /* Free revocation reference. */ 749 crfree(oldcred); 750 751out: 752 mac_cred_label_free(intlabel); 753 return (error); 754} 755 756/* 757 * MPSAFE 758 */ 759int 760__mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap) 761{ 762 char *elements, *buffer; 763 struct label *intlabel; 764 struct file *fp; 765 struct mac mac; 766 struct vnode *vp; 767 struct pipe *pipe; 768 struct socket *so; 769 short label_type; 770 int vfslocked, error; 771 772 error = copyin(uap->mac_p, &mac, sizeof(mac)); 773 if (error) 774 return (error); 775 776 error = mac_check_structmac_consistent(&mac); 777 if (error) 778 return (error); 779 780 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 781 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 782 if (error) { 783 free(elements, M_MACTEMP); 784 return (error); 785 } 786 787 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 788 error = fget(td, uap->fd, &fp); 789 if (error) 790 goto out; 791 792 label_type = fp->f_type; 793 switch (fp->f_type) { 794 case DTYPE_FIFO: 795 case DTYPE_VNODE: 796 vp = fp->f_vnode; 797 intlabel = mac_vnode_label_alloc(); 798 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 799 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 800 mac_copy_vnode_label(vp->v_label, intlabel); 801 VOP_UNLOCK(vp, 0, td); 802 VFS_UNLOCK_GIANT(vfslocked); 803 error = mac_externalize_vnode_label(intlabel, elements, 804 buffer, mac.m_buflen); 805 mac_vnode_label_free(intlabel); 806 break; 807 808 case DTYPE_PIPE: 809 pipe = fp->f_data; 810 intlabel = mac_pipe_label_alloc(); 811 PIPE_LOCK(pipe); 812 mac_copy_pipe_label(pipe->pipe_pair->pp_label, intlabel); 813 PIPE_UNLOCK(pipe); 814 error = mac_externalize_pipe_label(intlabel, elements, 815 buffer, mac.m_buflen); 816 mac_pipe_label_free(intlabel); 817 break; 818 819 case DTYPE_SOCKET: 820 so = fp->f_data; 821 intlabel = mac_socket_label_alloc(M_WAITOK); 822 NET_LOCK_GIANT(); 823 SOCK_LOCK(so); 824 mac_copy_socket_label(so->so_label, intlabel); 825 SOCK_UNLOCK(so); 826 NET_UNLOCK_GIANT(); 827 error = mac_externalize_socket_label(intlabel, elements, 828 buffer, mac.m_buflen); 829 mac_socket_label_free(intlabel); 830 break; 831 832 default: 833 error = EINVAL; 834 } 835 fdrop(fp, td); 836 if (error == 0) 837 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 838 839out: 840 free(buffer, M_MACTEMP); 841 free(elements, M_MACTEMP); 842 return (error); 843} 844 845/* 846 * MPSAFE 847 */ 848int 849__mac_get_file(struct thread *td, struct __mac_get_file_args *uap) 850{ 851 char *elements, *buffer; 852 struct nameidata nd; 853 struct label *intlabel; 854 struct mac mac; 855 int vfslocked, error; 856 857 error = copyin(uap->mac_p, &mac, sizeof(mac)); 858 if (error) 859 return (error); 860 861 error = mac_check_structmac_consistent(&mac); 862 if (error) 863 return (error); 864 865 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 866 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 867 if (error) { 868 free(elements, M_MACTEMP); 869 return (error); 870 } 871 872 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 873 NDINIT(&nd, LOOKUP, MPSAFE | LOCKLEAF | FOLLOW, UIO_USERSPACE, 874 uap->path_p, td); 875 error = namei(&nd); 876 if (error) 877 goto out; 878 879 intlabel = mac_vnode_label_alloc(); 880 vfslocked = NDHASGIANT(&nd); 881 mac_copy_vnode_label(nd.ni_vp->v_label, intlabel); 882 error = mac_externalize_vnode_label(intlabel, elements, buffer, 883 mac.m_buflen); 884 885 NDFREE(&nd, 0); 886 VFS_UNLOCK_GIANT(vfslocked); 887 mac_vnode_label_free(intlabel); 888 if (error == 0) 889 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 890 891out: 892 free(buffer, M_MACTEMP); 893 free(elements, M_MACTEMP); 894 895 return (error); 896} 897 898/* 899 * MPSAFE 900 */ 901int 902__mac_get_link(struct thread *td, struct __mac_get_link_args *uap) 903{ 904 char *elements, *buffer; 905 struct nameidata nd; 906 struct label *intlabel; 907 struct mac mac; 908 int vfslocked, error; 909 910 error = copyin(uap->mac_p, &mac, sizeof(mac)); 911 if (error) 912 return (error); 913 914 error = mac_check_structmac_consistent(&mac); 915 if (error) 916 return (error); 917 918 elements = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 919 error = copyinstr(mac.m_string, elements, mac.m_buflen, NULL); 920 if (error) { 921 free(elements, M_MACTEMP); 922 return (error); 923 } 924 925 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK | M_ZERO); 926 NDINIT(&nd, LOOKUP, MPSAFE | LOCKLEAF | NOFOLLOW, UIO_USERSPACE, 927 uap->path_p, td); 928 error = namei(&nd); 929 if (error) 930 goto out; 931 932 intlabel = mac_vnode_label_alloc(); 933 vfslocked = NDHASGIANT(&nd); 934 mac_copy_vnode_label(nd.ni_vp->v_label, intlabel); 935 error = mac_externalize_vnode_label(intlabel, elements, buffer, 936 mac.m_buflen); 937 NDFREE(&nd, 0); 938 VFS_UNLOCK_GIANT(vfslocked); 939 mac_vnode_label_free(intlabel); 940 941 if (error == 0) 942 error = copyout(buffer, mac.m_string, strlen(buffer)+1); 943 944out: 945 free(buffer, M_MACTEMP); 946 free(elements, M_MACTEMP); 947 948 return (error); 949} 950 951/* 952 * MPSAFE 953 */ 954int 955__mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap) 956{ 957 struct label *intlabel; 958 struct pipe *pipe; 959 struct socket *so; 960 struct file *fp; 961 struct mount *mp; 962 struct vnode *vp; 963 struct mac mac; 964 char *buffer; 965 int error, vfslocked; 966 967 error = copyin(uap->mac_p, &mac, sizeof(mac)); 968 if (error) 969 return (error); 970 971 error = mac_check_structmac_consistent(&mac); 972 if (error) 973 return (error); 974 975 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 976 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 977 if (error) { 978 free(buffer, M_MACTEMP); 979 return (error); 980 } 981 982 error = fget(td, uap->fd, &fp); 983 if (error) 984 goto out; 985 986 switch (fp->f_type) { 987 case DTYPE_FIFO: 988 case DTYPE_VNODE: 989 intlabel = mac_vnode_label_alloc(); 990 error = mac_internalize_vnode_label(intlabel, buffer); 991 if (error) { 992 mac_vnode_label_free(intlabel); 993 break; 994 } 995 vp = fp->f_vnode; 996 vfslocked = VFS_LOCK_GIANT(vp->v_mount); 997 error = vn_start_write(vp, &mp, V_WAIT | PCATCH); 998 if (error != 0) { 999 VFS_UNLOCK_GIANT(vfslocked); 1000 mac_vnode_label_free(intlabel); 1001 break; 1002 } 1003 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 1004 error = vn_setlabel(vp, intlabel, td->td_ucred); 1005 VOP_UNLOCK(vp, 0, td); 1006 vn_finished_write(mp); 1007 VFS_UNLOCK_GIANT(vfslocked); 1008 mac_vnode_label_free(intlabel); 1009 break; 1010 1011 case DTYPE_PIPE: 1012 intlabel = mac_pipe_label_alloc(); 1013 error = mac_internalize_pipe_label(intlabel, buffer); 1014 if (error == 0) { 1015 pipe = fp->f_data; 1016 PIPE_LOCK(pipe); 1017 error = mac_pipe_label_set(td->td_ucred, 1018 pipe->pipe_pair, intlabel); 1019 PIPE_UNLOCK(pipe); 1020 } 1021 mac_pipe_label_free(intlabel); 1022 break; 1023 1024 case DTYPE_SOCKET: 1025 intlabel = mac_socket_label_alloc(M_WAITOK); 1026 error = mac_internalize_socket_label(intlabel, buffer); 1027 if (error == 0) { 1028 so = fp->f_data; 1029 NET_LOCK_GIANT(); 1030 error = mac_socket_label_set(td->td_ucred, so, 1031 intlabel); 1032 NET_UNLOCK_GIANT(); 1033 } 1034 mac_socket_label_free(intlabel); 1035 break; 1036 1037 default: 1038 error = EINVAL; 1039 } 1040 fdrop(fp, td); 1041out: 1042 free(buffer, M_MACTEMP); 1043 return (error); 1044} 1045 1046/* 1047 * MPSAFE 1048 */ 1049int 1050__mac_set_file(struct thread *td, struct __mac_set_file_args *uap) 1051{ 1052 struct label *intlabel; 1053 struct nameidata nd; 1054 struct mount *mp; 1055 struct mac mac; 1056 char *buffer; 1057 int vfslocked, error; 1058 1059 error = copyin(uap->mac_p, &mac, sizeof(mac)); 1060 if (error) 1061 return (error); 1062 1063 error = mac_check_structmac_consistent(&mac); 1064 if (error) 1065 return (error); 1066 1067 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 1068 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 1069 if (error) { 1070 free(buffer, M_MACTEMP); 1071 return (error); 1072 } 1073 1074 intlabel = mac_vnode_label_alloc(); 1075 error = mac_internalize_vnode_label(intlabel, buffer); 1076 free(buffer, M_MACTEMP); 1077 if (error) 1078 goto out; 1079 1080 NDINIT(&nd, LOOKUP, MPSAFE | LOCKLEAF | FOLLOW, UIO_USERSPACE, 1081 uap->path_p, td); 1082 error = namei(&nd); 1083 vfslocked = NDHASGIANT(&nd); 1084 if (error == 0) { 1085 error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH); 1086 if (error == 0) { 1087 error = vn_setlabel(nd.ni_vp, intlabel, 1088 td->td_ucred); 1089 vn_finished_write(mp); 1090 } 1091 } 1092 1093 NDFREE(&nd, 0); 1094 VFS_UNLOCK_GIANT(vfslocked); 1095out: 1096 mac_vnode_label_free(intlabel); 1097 return (error); 1098} 1099 1100/* 1101 * MPSAFE 1102 */ 1103int 1104__mac_set_link(struct thread *td, struct __mac_set_link_args *uap) 1105{ 1106 struct label *intlabel; 1107 struct nameidata nd; 1108 struct mount *mp; 1109 struct mac mac; 1110 char *buffer; 1111 int vfslocked, error; 1112 1113 error = copyin(uap->mac_p, &mac, sizeof(mac)); 1114 if (error) 1115 return (error); 1116 1117 error = mac_check_structmac_consistent(&mac); 1118 if (error) 1119 return (error); 1120 1121 buffer = malloc(mac.m_buflen, M_MACTEMP, M_WAITOK); 1122 error = copyinstr(mac.m_string, buffer, mac.m_buflen, NULL); 1123 if (error) { 1124 free(buffer, M_MACTEMP); 1125 return (error); 1126 } 1127 1128 intlabel = mac_vnode_label_alloc(); 1129 error = mac_internalize_vnode_label(intlabel, buffer); 1130 free(buffer, M_MACTEMP); 1131 if (error) 1132 goto out; 1133 1134 NDINIT(&nd, LOOKUP, MPSAFE | LOCKLEAF | NOFOLLOW, UIO_USERSPACE, 1135 uap->path_p, td); 1136 error = namei(&nd); 1137 vfslocked = NDHASGIANT(&nd); 1138 if (error == 0) { 1139 error = vn_start_write(nd.ni_vp, &mp, V_WAIT | PCATCH); 1140 if (error == 0) { 1141 error = vn_setlabel(nd.ni_vp, intlabel, 1142 td->td_ucred); 1143 vn_finished_write(mp); 1144 } 1145 } 1146 1147 NDFREE(&nd, 0); 1148 VFS_UNLOCK_GIANT(vfslocked); 1149out: 1150 mac_vnode_label_free(intlabel); 1151 return (error); 1152} 1153 1154/* 1155 * MPSAFE 1156 */ 1157int 1158mac_syscall(struct thread *td, struct mac_syscall_args *uap) 1159{ 1160 struct mac_policy_conf *mpc; 1161 char target[MAC_MAX_POLICY_NAME]; 1162 int entrycount, error; 1163 1164 error = copyinstr(uap->policy, target, sizeof(target), NULL); 1165 if (error) 1166 return (error); 1167 1168 error = ENOSYS; 1169 LIST_FOREACH(mpc, &mac_static_policy_list, mpc_list) { 1170 if (strcmp(mpc->mpc_name, target) == 0 && 1171 mpc->mpc_ops->mpo_syscall != NULL) { 1172 error = mpc->mpc_ops->mpo_syscall(td, 1173 uap->call, uap->arg); 1174 goto out; 1175 } 1176 } 1177 1178 if ((entrycount = mac_policy_list_conditional_busy()) != 0) { 1179 LIST_FOREACH(mpc, &mac_policy_list, mpc_list) { 1180 if (strcmp(mpc->mpc_name, target) == 0 && 1181 mpc->mpc_ops->mpo_syscall != NULL) { 1182 error = mpc->mpc_ops->mpo_syscall(td, 1183 uap->call, uap->arg); 1184 break; 1185 } 1186 } 1187 mac_policy_list_unbusy(); 1188 } 1189out: 1190 return (error); 1191} 1192 1193SYSINIT(mac, SI_SUB_MAC, SI_ORDER_FIRST, mac_init, NULL); 1194SYSINIT(mac_late, SI_SUB_MAC_LATE, SI_ORDER_FIRST, mac_late_init, NULL); 1195 1196#else /* !MAC */ 1197 1198int 1199__mac_get_pid(struct thread *td, struct __mac_get_pid_args *uap) 1200{ 1201 1202 return (ENOSYS); 1203} 1204 1205int 1206__mac_get_proc(struct thread *td, struct __mac_get_proc_args *uap) 1207{ 1208 1209 return (ENOSYS); 1210} 1211 1212int 1213__mac_set_proc(struct thread *td, struct __mac_set_proc_args *uap) 1214{ 1215 1216 return (ENOSYS); 1217} 1218 1219int 1220__mac_get_fd(struct thread *td, struct __mac_get_fd_args *uap) 1221{ 1222 1223 return (ENOSYS); 1224} 1225 1226int 1227__mac_get_file(struct thread *td, struct __mac_get_file_args *uap) 1228{ 1229 1230 return (ENOSYS); 1231} 1232 1233int 1234__mac_get_link(struct thread *td, struct __mac_get_link_args *uap) 1235{ 1236 1237 return (ENOSYS); 1238} 1239 1240int 1241__mac_set_fd(struct thread *td, struct __mac_set_fd_args *uap) 1242{ 1243 1244 return (ENOSYS); 1245} 1246 1247int 1248__mac_set_file(struct thread *td, struct __mac_set_file_args *uap) 1249{ 1250 1251 return (ENOSYS); 1252} 1253 1254int 1255__mac_set_link(struct thread *td, struct __mac_set_link_args *uap) 1256{ 1257 1258 return (ENOSYS); 1259} 1260 1261int 1262mac_syscall(struct thread *td, struct mac_syscall_args *uap) 1263{ 1264 1265 return (ENOSYS); 1266} 1267 1268#endif /* !MAC */ 1269