1/*- 2 * Copyright (c) 2008-2010 Apple Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of 15 * its contributors may be used to endorse or promote products derived 16 * from this software without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY 19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY 22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <string.h> 31 32#include <sys/kernel.h> 33#include <sys/proc.h> 34#include <sys/systm.h> 35 36#include <kern/host.h> 37#include <kern/kalloc.h> 38#include <kern/locks.h> 39#include <kern/sched_prim.h> 40 41#include <libkern/OSAtomic.h> 42 43#include <bsm/audit.h> 44#include <bsm/audit_internal.h> 45 46#include <security/audit/audit_bsd.h> 47#include <security/audit/audit.h> 48#include <security/audit/audit_private.h> 49 50#include <mach/host_priv.h> 51#include <mach/host_special_ports.h> 52#include <mach/audit_triggers_server.h> 53 54#if CONFIG_AUDIT 55struct mhdr { 56 size_t mh_size; 57 au_malloc_type_t *mh_type; 58 u_long mh_magic; 59 char mh_data[0]; 60}; 61 62/* 63 * The lock group for the audit subsystem. 64 */ 65static lck_grp_t *audit_lck_grp = NULL; 66 67#define AUDIT_MHMAGIC 0x4D656C53 68 69#if AUDIT_MALLOC_DEBUG 70#define AU_MAX_SHORTDESC 20 71#define AU_MAX_LASTCALLER 20 72struct au_malloc_debug_info { 73 SInt64 md_size; 74 SInt64 md_maxsize; 75 SInt32 md_inuse; 76 SInt32 md_maxused; 77 unsigned md_type; 78 unsigned md_magic; 79 char md_shortdesc[AU_MAX_SHORTDESC]; 80 char md_lastcaller[AU_MAX_LASTCALLER]; 81}; 82typedef struct au_malloc_debug_info au_malloc_debug_info_t; 83 84au_malloc_type_t *audit_malloc_types[NUM_MALLOC_TYPES]; 85 86static int audit_sysctl_malloc_debug(struct sysctl_oid *oidp, void *arg1, 87 int arg2, struct sysctl_req *req); 88 89SYSCTL_PROC(_kern, OID_AUTO, audit_malloc_debug, CTLFLAG_RD, NULL, 0, 90 audit_sysctl_malloc_debug, "S,audit_malloc_debug", 91 "Current malloc debug info for auditing."); 92 93#define AU_MALLOC_DBINFO_SZ \ 94 (NUM_MALLOC_TYPES * sizeof(au_malloc_debug_info_t)) 95 96/* 97 * Copy out the malloc debug info via the sysctl interface. The userland code 98 * is something like the following: 99 * 100 * error = sysctlbyname("kern.audit_malloc_debug", buffer_ptr, &buffer_len, 101 * NULL, 0); 102 */ 103static int 104audit_sysctl_malloc_debug(__unused struct sysctl_oid *oidp, __unused void *arg1, 105 __unused int arg2, struct sysctl_req *req) 106{ 107 int i; 108 size_t sz; 109 au_malloc_debug_info_t *amdi_ptr, *nxt_ptr; 110 int err; 111 112 /* 113 * This provides a read-only node. 114 */ 115 if (req->newptr != USER_ADDR_NULL) 116 return (EPERM); 117 118 /* 119 * If just querying then return the space required. 120 */ 121 if (req->oldptr == USER_ADDR_NULL) { 122 req->oldidx = AU_MALLOC_DBINFO_SZ; 123 return (0); 124 } 125 126 /* 127 * Alloc a temporary buffer. 128 */ 129 if (req->oldlen < AU_MALLOC_DBINFO_SZ) 130 return (ENOMEM); 131 amdi_ptr = (au_malloc_debug_info_t *)kalloc(AU_MALLOC_DBINFO_SZ); 132 if (amdi_ptr == NULL) 133 return (ENOMEM); 134 bzero(amdi_ptr, AU_MALLOC_DBINFO_SZ); 135 136 /* 137 * Build the record array. 138 */ 139 sz = 0; 140 nxt_ptr = amdi_ptr; 141 for(i = 0; i < NUM_MALLOC_TYPES; i++) { 142 if (audit_malloc_types[i] == NULL) 143 continue; 144 if (audit_malloc_types[i]->mt_magic != M_MAGIC) { 145 nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; 146 continue; 147 } 148 nxt_ptr->md_magic = audit_malloc_types[i]->mt_magic; 149 nxt_ptr->md_size = audit_malloc_types[i]->mt_size; 150 nxt_ptr->md_maxsize = audit_malloc_types[i]->mt_maxsize; 151 nxt_ptr->md_inuse = (int)audit_malloc_types[i]->mt_inuse; 152 nxt_ptr->md_maxused = (int)audit_malloc_types[i]->mt_maxused; 153 strlcpy(nxt_ptr->md_shortdesc, 154 audit_malloc_types[i]->mt_shortdesc, AU_MAX_SHORTDESC - 1); 155 strlcpy(nxt_ptr->md_lastcaller, 156 audit_malloc_types[i]->mt_lastcaller, AU_MAX_LASTCALLER-1); 157 sz += sizeof(au_malloc_debug_info_t); 158 nxt_ptr++; 159 } 160 161 req->oldlen = sz; 162 err = SYSCTL_OUT(req, amdi_ptr, sz); 163 kfree(amdi_ptr, AU_MALLOC_DBINFO_SZ); 164 165 return (err); 166} 167#endif /* AUDIT_MALLOC_DEBUG */ 168 169/* 170 * BSD malloc() 171 * 172 * If the M_NOWAIT flag is set then it may not block and return NULL. 173 * If the M_ZERO flag is set then zero out the buffer. 174 */ 175void * 176#if AUDIT_MALLOC_DEBUG 177_audit_malloc(size_t size, au_malloc_type_t *type, int flags, const char *fn) 178#else 179_audit_malloc(size_t size, au_malloc_type_t *type, int flags) 180#endif 181{ 182 struct mhdr *hdr; 183 size_t memsize = sizeof (*hdr) + size; 184 185 if (size == 0) 186 return (NULL); 187 if (flags & M_NOWAIT) { 188 hdr = (void *)kalloc_noblock(memsize); 189 } else { 190 hdr = (void *)kalloc(memsize); 191 if (hdr == NULL) 192 panic("_audit_malloc: kernel memory exhausted"); 193 } 194 if (hdr == NULL) 195 return (NULL); 196 hdr->mh_size = memsize; 197 hdr->mh_type = type; 198 hdr->mh_magic = AUDIT_MHMAGIC; 199 if (flags & M_ZERO) 200 memset(hdr->mh_data, 0, size); 201#if AUDIT_MALLOC_DEBUG 202 if (type != NULL && type->mt_type < NUM_MALLOC_TYPES) { 203 OSAddAtomic64(memsize, &type->mt_size); 204 type->mt_maxsize = max(type->mt_size, type->mt_maxsize); 205 OSAddAtomic(1, &type->mt_inuse); 206 type->mt_maxused = max(type->mt_inuse, type->mt_maxused); 207 type->mt_lastcaller = fn; 208 audit_malloc_types[type->mt_type] = type; 209 } 210#endif /* AUDIT_MALLOC_DEBUG */ 211 return (hdr->mh_data); 212} 213 214/* 215 * BSD free() 216 */ 217void 218#if AUDIT_MALLOC_DEBUG 219_audit_free(void *addr, au_malloc_type_t *type) 220#else 221_audit_free(void *addr, __unused au_malloc_type_t *type) 222#endif 223{ 224 struct mhdr *hdr; 225 226 if (addr == NULL) 227 return; 228 hdr = addr; hdr--; 229 230 KASSERT(hdr->mh_magic == AUDIT_MHMAGIC, 231 ("_audit_free(): hdr->mh_magic != AUDIT_MHMAGIC")); 232 233#if AUDIT_MALLOC_DEBUG 234 if (type != NULL) { 235 OSAddAtomic64(-hdr->mh_size, &type->mt_size); 236 OSAddAtomic(-1, &type->mt_inuse); 237 } 238#endif /* AUDIT_MALLOC_DEBUG */ 239 kfree(hdr, hdr->mh_size); 240} 241 242/* 243 * Initialize a condition variable. Must be called before use. 244 */ 245void 246_audit_cv_init(struct cv *cvp, const char *desc) 247{ 248 249 if (desc == NULL) 250 cvp->cv_description = "UNKNOWN"; 251 else 252 cvp->cv_description = desc; 253 cvp->cv_waiters = 0; 254} 255 256/* 257 * Destory a condition variable. 258 */ 259void 260_audit_cv_destroy(struct cv *cvp) 261{ 262 263 cvp->cv_description = NULL; 264 cvp->cv_waiters = 0; 265} 266 267/* 268 * Signal a condition variable, wakes up one waiting thread. 269 */ 270void 271_audit_cv_signal(struct cv *cvp) 272{ 273 274 if (cvp->cv_waiters > 0) { 275 wakeup_one((caddr_t)cvp); 276 cvp->cv_waiters--; 277 } 278} 279 280/* 281 * Broadcast a signal to a condition variable. 282 */ 283void 284_audit_cv_broadcast(struct cv *cvp) 285{ 286 287 if (cvp->cv_waiters > 0) { 288 wakeup((caddr_t)cvp); 289 cvp->cv_waiters = 0; 290 } 291} 292 293/* 294 * Wait on a condition variable. A cv_signal or cv_broadcast on the same 295 * condition variable will resume the thread. It is recommended that the mutex 296 * be held when cv_signal or cv_broadcast are called. 297 */ 298void 299_audit_cv_wait(struct cv *cvp, lck_mtx_t *mp, const char *desc) 300{ 301 302 cvp->cv_waiters++; 303 (void) msleep(cvp, mp, PZERO, desc, 0); 304} 305 306/* 307 * Wait on a condition variable, allowing interruption by signals. Return 0 308 * if the thread was resumed with cv_signal or cv_broadcast, EINTR or 309 * ERESTART if a signal was caught. If ERESTART is returned the system call 310 * should be restarted if possible. 311 */ 312int 313_audit_cv_wait_sig(struct cv *cvp, lck_mtx_t *mp, const char *desc) 314{ 315 316 cvp->cv_waiters++; 317 return (msleep(cvp, mp, PSOCK | PCATCH, desc, 0)); 318} 319 320/* 321 * BSD Mutexes. 322 */ 323void 324#if DIAGNOSTIC 325_audit_mtx_init(struct mtx *mp, const char *lckname) 326#else 327_audit_mtx_init(struct mtx *mp, __unused const char *lckname) 328#endif 329{ 330 mp->mtx_lock = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); 331 KASSERT(mp->mtx_lock != NULL, 332 ("_audit_mtx_init: Could not allocate a mutex.")); 333#if DIAGNOSTIC 334 strlcpy(mp->mtx_name, lckname, AU_MAX_LCK_NAME); 335#endif 336} 337 338void 339_audit_mtx_destroy(struct mtx *mp) 340{ 341 342 if (mp->mtx_lock) { 343 lck_mtx_free(mp->mtx_lock, audit_lck_grp); 344 mp->mtx_lock = NULL; 345 } 346} 347 348/* 349 * BSD rw locks. 350 */ 351void 352#if DIAGNOSTIC 353_audit_rw_init(struct rwlock *lp, const char *lckname) 354#else 355_audit_rw_init(struct rwlock *lp, __unused const char *lckname) 356#endif 357{ 358 lp->rw_lock = lck_rw_alloc_init(audit_lck_grp, LCK_ATTR_NULL); 359 KASSERT(lp->rw_lock != NULL, 360 ("_audit_rw_init: Could not allocate a rw lock.")); 361#if DIAGNOSTIC 362 strlcpy(lp->rw_name, lckname, AU_MAX_LCK_NAME); 363#endif 364} 365 366void 367_audit_rw_destroy(struct rwlock *lp) 368{ 369 370 if (lp->rw_lock) { 371 lck_rw_free(lp->rw_lock, audit_lck_grp); 372 lp->rw_lock = NULL; 373 } 374} 375/* 376 * Wait on a condition variable in a continuation (i.e. yield kernel stack). 377 * A cv_signal or cv_broadcast on the same condition variable will cause 378 * the thread to be scheduled. 379 */ 380int 381_audit_cv_wait_continuation(struct cv *cvp, lck_mtx_t *mp, thread_continue_t function) 382{ 383 int status = KERN_SUCCESS; 384 385 cvp->cv_waiters++; 386 assert_wait(cvp, THREAD_UNINT); 387 lck_mtx_unlock(mp); 388 389 status = thread_block(function); 390 391 /* should not be reached, but just in case, re-lock */ 392 lck_mtx_lock(mp); 393 394 return status; 395} 396 397/* 398 * Simple recursive lock. 399 */ 400void 401#if DIAGNOSTIC 402_audit_rlck_init(struct rlck *lp, const char *lckname) 403#else 404_audit_rlck_init(struct rlck *lp, __unused const char *lckname) 405#endif 406{ 407 408 lp->rl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); 409 KASSERT(lp->rl_mtx != NULL, 410 ("_audit_rlck_init: Could not allocate a recursive lock.")); 411#if DIAGNOSTIC 412 strlcpy(lp->rl_name, lckname, AU_MAX_LCK_NAME); 413#endif 414 lp->rl_thread = 0; 415 lp->rl_recurse = 0; 416} 417 418/* 419 * Recursive lock. Allow same thread to recursively lock the same lock. 420 */ 421void 422_audit_rlck_lock(struct rlck *lp) 423{ 424 425 if (lp->rl_thread == current_thread()) { 426 OSAddAtomic(1, &lp->rl_recurse); 427 KASSERT(lp->rl_recurse < 10000, 428 ("_audit_rlck_lock: lock nested too deep.")); 429 } else { 430 lck_mtx_lock(lp->rl_mtx); 431 lp->rl_thread = current_thread(); 432 lp->rl_recurse = 1; 433 } 434} 435 436/* 437 * Recursive unlock. It should be the same thread that does the unlock. 438 */ 439void 440_audit_rlck_unlock(struct rlck *lp) 441{ 442 KASSERT(lp->rl_thread == current_thread(), 443 ("_audit_rlck_unlock(): Don't own lock.")); 444 445 /* Note: OSAddAtomic returns old value. */ 446 if (OSAddAtomic(-1, &lp->rl_recurse) == 1) { 447 lp->rl_thread = 0; 448 lck_mtx_unlock(lp->rl_mtx); 449 } 450} 451 452void 453_audit_rlck_destroy(struct rlck *lp) 454{ 455 456 if (lp->rl_mtx) { 457 lck_mtx_free(lp->rl_mtx, audit_lck_grp); 458 lp->rl_mtx = NULL; 459 } 460} 461 462/* 463 * Recursive lock assert. 464 */ 465void 466_audit_rlck_assert(struct rlck *lp, u_int assert) 467{ 468 thread_t cthd = current_thread(); 469 470 if (assert == LCK_MTX_ASSERT_OWNED && lp->rl_thread == cthd) 471 panic("recursive lock (%p) not held by this thread (%p).", 472 lp, cthd); 473 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->rl_thread != 0) 474 panic("recursive lock (%p) held by thread (%p).", 475 lp, cthd); 476} 477 478/* 479 * Simple sleep lock. 480 */ 481void 482#if DIAGNOSTIC 483_audit_slck_init(struct slck *lp, const char *lckname) 484#else 485_audit_slck_init(struct slck *lp, __unused const char *lckname) 486#endif 487{ 488 489 lp->sl_mtx = lck_mtx_alloc_init(audit_lck_grp, LCK_ATTR_NULL); 490 KASSERT(lp->sl_mtx != NULL, 491 ("_audit_slck_init: Could not allocate a sleep lock.")); 492#if DIAGNOSTIC 493 strlcpy(lp->sl_name, lckname, AU_MAX_LCK_NAME); 494#endif 495 lp->sl_locked = 0; 496 lp->sl_waiting = 0; 497} 498 499/* 500 * Sleep lock lock. The 'intr' flag determines if the lock is interruptible. 501 * If 'intr' is true then signals or other events can interrupt the sleep lock. 502 */ 503wait_result_t 504_audit_slck_lock(struct slck *lp, int intr) 505{ 506 wait_result_t res = THREAD_AWAKENED; 507 508 lck_mtx_lock(lp->sl_mtx); 509 while (lp->sl_locked && res == THREAD_AWAKENED) { 510 lp->sl_waiting = 1; 511 res = lck_mtx_sleep(lp->sl_mtx, LCK_SLEEP_DEFAULT, 512 (event_t) lp, (intr) ? THREAD_INTERRUPTIBLE : THREAD_UNINT); 513 } 514 if (res == THREAD_AWAKENED) 515 lp->sl_locked = 1; 516 lck_mtx_unlock(lp->sl_mtx); 517 518 return (res); 519} 520 521/* 522 * Sleep lock unlock. Wake up all the threads waiting for this lock. 523 */ 524void 525_audit_slck_unlock(struct slck *lp) 526{ 527 528 lck_mtx_lock(lp->sl_mtx); 529 lp->sl_locked = 0; 530 if (lp->sl_waiting) { 531 lp->sl_waiting = 0; 532 533 /* Wake up *all* sleeping threads. */ 534 wakeup((event_t) lp); 535 } 536 lck_mtx_unlock(lp->sl_mtx); 537} 538 539/* 540 * Sleep lock try. Don't sleep if it doesn't get the lock. 541 */ 542int 543_audit_slck_trylock(struct slck *lp) 544{ 545 int result; 546 547 lck_mtx_lock(lp->sl_mtx); 548 result = !lp->sl_locked; 549 if (result) 550 lp->sl_locked = 1; 551 lck_mtx_unlock(lp->sl_mtx); 552 553 return (result); 554} 555 556/* 557 * Sleep lock assert. 558 */ 559void 560_audit_slck_assert(struct slck *lp, u_int assert) 561{ 562 563 if (assert == LCK_MTX_ASSERT_OWNED && lp->sl_locked == 0) 564 panic("sleep lock (%p) not held.", lp); 565 if (assert == LCK_MTX_ASSERT_NOTOWNED && lp->sl_locked == 1) 566 panic("sleep lock (%p) held.", lp); 567} 568 569void 570_audit_slck_destroy(struct slck *lp) 571{ 572 573 if (lp->sl_mtx) { 574 lck_mtx_free(lp->sl_mtx, audit_lck_grp); 575 lp->sl_mtx = NULL; 576 } 577} 578 579/* 580 * XXXss - This code was taken from bsd/netinet6/icmp6.c. Maybe ppsratecheck() 581 * should be made global in icmp6.c. 582 */ 583#ifndef timersub 584#define timersub(tvp, uvp, vvp) \ 585 do { \ 586 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \ 587 (vvp)->tv_usec = (tvp)->tv_usec - (uvp)->tv_usec; \ 588 if ((vvp)->tv_usec < 0) { \ 589 (vvp)->tv_sec--; \ 590 (vvp)->tv_usec += 1000000; \ 591 } \ 592 } while (0) 593#endif 594 595/* 596 * Packets (or events) per second limitation. 597 */ 598int 599_audit_ppsratecheck(struct timeval *lasttime, int *curpps, int maxpps) 600{ 601 struct timeval tv, delta; 602 int rv; 603 604 microtime(&tv); 605 606 timersub(&tv, lasttime, &delta); 607 608 /* 609 * Check for 0,0 so that the message will be seen at least once. 610 * If more than one second has passed since the last update of 611 * lasttime, reset the counter. 612 * 613 * we do increment *curpps even in *curpps < maxpps case, as some may 614 * try to use *curpps for stat purposes as well. 615 */ 616 if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) || 617 delta.tv_sec >= 1) { 618 *lasttime = tv; 619 *curpps = 0; 620 rv = 1; 621 } else if (maxpps < 0) 622 rv = 1; 623 else if (*curpps < maxpps) 624 rv = 1; 625 else 626 rv = 0; 627 if (*curpps + 1 > 0) 628 *curpps = *curpps + 1; 629 630 return (rv); 631} 632 633/* 634 * Initialize lock group for audit related locks/mutexes. 635 */ 636void 637_audit_lck_grp_init(void) 638{ 639 audit_lck_grp = lck_grp_alloc_init("Audit", LCK_GRP_ATTR_NULL); 640 641 KASSERT(audit_lck_grp != NULL, 642 ("audit_get_lck_grp: Could not allocate the audit lock group.")); 643} 644 645int 646audit_send_trigger(unsigned int trigger) 647{ 648 mach_port_t audit_port; 649 int error; 650 651 error = host_get_audit_control_port(host_priv_self(), &audit_port); 652 if (error == KERN_SUCCESS && audit_port != MACH_PORT_NULL) { 653 audit_triggers(audit_port, trigger); 654 return (0); 655 } else { 656 printf("Cannot get audit control port\n"); 657 return (error); 658 } 659} 660#endif /* CONFIG_AUDIT */ 661