audit_worker.c revision 159261
1156888Srwatson/* 2156888Srwatson * Copyright (c) 1999-2005 Apple Computer, Inc. 3156888Srwatson * Copyright (c) 2006 Robert N. M. Watson 4156888Srwatson * All rights reserved. 5156888Srwatson * 6156888Srwatson * Redistribution and use in source and binary forms, with or without 7156888Srwatson * modification, are permitted provided that the following conditions 8156888Srwatson * are met: 9156888Srwatson * 1. Redistributions of source code must retain the above copyright 10156888Srwatson * notice, this list of conditions and the following disclaimer. 11156888Srwatson * 2. Redistributions in binary form must reproduce the above copyright 12156888Srwatson * notice, this list of conditions and the following disclaimer in the 13156888Srwatson * documentation and/or other materials provided with the distribution. 14156888Srwatson * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of 15156888Srwatson * its contributors may be used to endorse or promote products derived 16156888Srwatson * from this software without specific prior written permission. 17156888Srwatson * 18156888Srwatson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND 19156888Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20156888Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21156888Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR 22156888Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23156888Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24156888Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25156888Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26156888Srwatson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27156888Srwatson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28156888Srwatson * POSSIBILITY OF SUCH DAMAGE. 29156888Srwatson * 30156888Srwatson * $FreeBSD: head/sys/security/audit/audit_worker.c 159261 2006-06-05 13:43:57Z rwatson $ 31156888Srwatson */ 32156888Srwatson 33156888Srwatson#include <sys/param.h> 34156888Srwatson#include <sys/condvar.h> 35156888Srwatson#include <sys/conf.h> 36156888Srwatson#include <sys/file.h> 37156888Srwatson#include <sys/filedesc.h> 38156888Srwatson#include <sys/fcntl.h> 39156888Srwatson#include <sys/ipc.h> 40156888Srwatson#include <sys/kernel.h> 41156888Srwatson#include <sys/kthread.h> 42156888Srwatson#include <sys/malloc.h> 43156888Srwatson#include <sys/mount.h> 44156888Srwatson#include <sys/namei.h> 45156888Srwatson#include <sys/proc.h> 46156888Srwatson#include <sys/queue.h> 47156888Srwatson#include <sys/socket.h> 48156888Srwatson#include <sys/socketvar.h> 49156888Srwatson#include <sys/protosw.h> 50156888Srwatson#include <sys/domain.h> 51156888Srwatson#include <sys/sysproto.h> 52156888Srwatson#include <sys/sysent.h> 53156888Srwatson#include <sys/systm.h> 54156888Srwatson#include <sys/ucred.h> 55156888Srwatson#include <sys/uio.h> 56156888Srwatson#include <sys/un.h> 57156888Srwatson#include <sys/unistd.h> 58156888Srwatson#include <sys/vnode.h> 59156888Srwatson 60156888Srwatson#include <bsm/audit.h> 61156888Srwatson#include <bsm/audit_internal.h> 62156888Srwatson#include <bsm/audit_kevents.h> 63156888Srwatson 64156888Srwatson#include <netinet/in.h> 65156888Srwatson#include <netinet/in_pcb.h> 66156888Srwatson 67156888Srwatson#include <security/audit/audit.h> 68156888Srwatson#include <security/audit/audit_private.h> 69156888Srwatson 70156888Srwatson#include <vm/uma.h> 71156888Srwatson 72156888Srwatson/* 73156888Srwatson * Worker thread that will schedule disk I/O, etc. 74156889Srwatson */ 75156888Srwatsonstatic struct proc *audit_thread; 76156888Srwatson 77156888Srwatson/* 78156889Srwatson * When an audit log is rotated, the actual rotation must be performed by the 79156889Srwatson * audit worker thread, as it may have outstanding writes on the current 80156889Srwatson * audit log. audit_replacement_vp holds the vnode replacing the current 81156889Srwatson * vnode. We can't let more than one replacement occur at a time, so if more 82156889Srwatson * than one thread requests a replacement, only one can have the replacement 83156889Srwatson * "in progress" at any given moment. If a thread tries to replace the audit 84156889Srwatson * vnode and discovers a replacement is already in progress (i.e., 85156889Srwatson * audit_replacement_flag != 0), then it will sleep on audit_replacement_cv 86156889Srwatson * waiting its turn to perform a replacement. When a replacement is 87156889Srwatson * completed, this cv is signalled by the worker thread so a waiting thread 88156889Srwatson * can start another replacement. We also store a credential to perform 89156889Srwatson * audit log write operations with. 90156888Srwatson * 91156888Srwatson * The current credential and vnode are thread-local to audit_worker. 92156888Srwatson */ 93156888Srwatsonstatic struct cv audit_replacement_cv; 94156888Srwatson 95156888Srwatsonstatic int audit_replacement_flag; 96156888Srwatsonstatic struct vnode *audit_replacement_vp; 97156888Srwatsonstatic struct ucred *audit_replacement_cred; 98156888Srwatson 99156888Srwatson/* 100156888Srwatson * Flags related to Kernel->user-space communication. 101156888Srwatson */ 102156888Srwatsonstatic int audit_file_rotate_wait; 103156888Srwatson 104156888Srwatson/* 105156888Srwatson * XXXAUDIT: Should adjust comments below to make it clear that we get to 106156889Srwatson * this point only if we believe we have storage, so not having space here is 107156889Srwatson * a violation of invariants derived from administrative procedures. I.e., 108156889Srwatson * someone else has written to the audit partition, leaving less space than 109156889Srwatson * we accounted for. 110156888Srwatson */ 111156888Srwatsonstatic int 112156889Srwatsonaudit_record_write(struct vnode *vp, struct kaudit_record *ar, 113156888Srwatson struct ucred *cred, struct thread *td) 114156888Srwatson{ 115156888Srwatson int ret; 116156888Srwatson long temp; 117156888Srwatson struct au_record *bsm; 118156888Srwatson struct vattr vattr; 119156888Srwatson struct statfs *mnt_stat = &vp->v_mount->mnt_stat; 120156888Srwatson int vfslocked; 121156888Srwatson 122156888Srwatson vfslocked = VFS_LOCK_GIANT(vp->v_mount); 123156888Srwatson 124156888Srwatson /* 125156889Srwatson * First, gather statistics on the audit log file and file system so 126156889Srwatson * that we know how we're doing on space. In both cases, if we're 127156889Srwatson * unable to perform the operation, we drop the record and return. 128156889Srwatson * However, this is arguably an assertion failure. 129156888Srwatson * XXX Need a FreeBSD equivalent. 130156888Srwatson */ 131156888Srwatson ret = VFS_STATFS(vp->v_mount, mnt_stat, td); 132156888Srwatson if (ret) 133156888Srwatson goto out; 134156888Srwatson 135156888Srwatson vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td); 136156888Srwatson ret = VOP_GETATTR(vp, &vattr, cred, td); 137156888Srwatson VOP_UNLOCK(vp, 0, td); 138156888Srwatson if (ret) 139156888Srwatson goto out; 140156888Srwatson 141156888Srwatson /* update the global stats struct */ 142156889Srwatson audit_fstat.af_currsz = vattr.va_size; 143156888Srwatson 144156888Srwatson /* 145156888Srwatson * XXX Need to decide what to do if the trigger to the audit daemon 146156888Srwatson * fails. 147156888Srwatson */ 148156888Srwatson 149156889Srwatson /* 150156888Srwatson * If we fall below minimum free blocks (hard limit), tell the audit 151156888Srwatson * daemon to force a rotation off of the file system. We also stop 152156889Srwatson * writing, which means this audit record is probably lost. If we 153156889Srwatson * fall below the minimum percent free blocks (soft limit), then 154156889Srwatson * kindly suggest to the audit daemon to do something. 155156888Srwatson */ 156156888Srwatson if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) { 157156888Srwatson (void)send_trigger(AUDIT_TRIGGER_NO_SPACE); 158156889Srwatson /* 159156889Srwatson * Hopefully userspace did something about all the previous 160156888Srwatson * triggers that were sent prior to this critical condition. 161156888Srwatson * If fail-stop is set, then we're done; goodnight Gracie. 162156888Srwatson */ 163156888Srwatson if (audit_fail_stop) 164156888Srwatson panic("Audit log space exhausted and fail-stop set."); 165156888Srwatson else { 166156888Srwatson audit_suspended = 1; 167156888Srwatson ret = ENOSPC; 168156888Srwatson goto out; 169156888Srwatson } 170156888Srwatson } else 171156889Srwatson /* 172156889Srwatson * Send a message to the audit daemon that disk space is 173156889Srwatson * getting low. 174156888Srwatson * 175156888Srwatson * XXXAUDIT: Check math and block size calculation here. 176156888Srwatson */ 177156888Srwatson if (audit_qctrl.aq_minfree != 0) { 178156889Srwatson temp = mnt_stat->f_blocks / (100 / 179156888Srwatson audit_qctrl.aq_minfree); 180156888Srwatson if (mnt_stat->f_bfree < temp) 181156888Srwatson (void)send_trigger(AUDIT_TRIGGER_LOW_SPACE); 182156888Srwatson } 183156888Srwatson 184156889Srwatson /* 185156889Srwatson * Check if the current log file is full; if so, call for a log 186156889Srwatson * rotate. This is not an exact comparison; we may write some records 187156889Srwatson * over the limit. If that's not acceptable, then add a fudge factor 188156889Srwatson * here. 189156888Srwatson */ 190156888Srwatson if ((audit_fstat.af_filesz != 0) && 191156889Srwatson (audit_file_rotate_wait == 0) && 192156888Srwatson (vattr.va_size >= audit_fstat.af_filesz)) { 193156888Srwatson audit_file_rotate_wait = 1; 194156888Srwatson (void)send_trigger(AUDIT_TRIGGER_OPEN_NEW); 195156888Srwatson } 196156888Srwatson 197156888Srwatson /* 198156888Srwatson * If the estimated amount of audit data in the audit event queue 199156889Srwatson * (plus records allocated but not yet queued) has reached the amount 200156889Srwatson * of free space on the disk, then we need to go into an audit fail 201156889Srwatson * stop state, in which we do not permit the allocation/committing of 202156889Srwatson * any new audit records. We continue to process packets but don't 203156889Srwatson * allow any activities that might generate new records. In the 204156889Srwatson * future, we might want to detect when space is available again and 205156889Srwatson * allow operation to continue, but this behavior is sufficient to 206156889Srwatson * meet fail stop requirements in CAPP. 207156888Srwatson */ 208156888Srwatson if (audit_fail_stop && 209156888Srwatson (unsigned long) 210156888Srwatson ((audit_q_len + audit_pre_q_len + 1) * MAX_AUDIT_RECORD_SIZE) / 211156888Srwatson mnt_stat->f_bsize >= (unsigned long)(mnt_stat->f_bfree)) { 212156888Srwatson printf("audit_record_write: free space below size of audit " 213156888Srwatson "queue, failing stop\n"); 214156888Srwatson audit_in_failure = 1; 215156888Srwatson } 216156888Srwatson 217156889Srwatson /* 218156888Srwatson * If there is a user audit record attached to the kernel record, 219156888Srwatson * then write the user record. 220156889Srwatson * 221156889Srwatson * XXX Need to decide a few things here: IF the user audit record is 222156889Srwatson * written, but the write of the kernel record fails, what to do? 223156889Srwatson * Should the kernel record come before or after the user record? 224156889Srwatson * For now, we write the user record first, and we ignore errors. 225156888Srwatson */ 226156888Srwatson if (ar->k_ar_commit & AR_COMMIT_USER) { 227156888Srwatson /* 228156888Srwatson * Try submitting the record to any active audit pipes. 229156888Srwatson */ 230156888Srwatson audit_pipe_submit((void *)ar->k_udata, ar->k_ulen); 231156888Srwatson 232156888Srwatson /* 233156888Srwatson * And to disk. 234156888Srwatson */ 235156888Srwatson ret = vn_rdwr(UIO_WRITE, vp, (void *)ar->k_udata, ar->k_ulen, 236156889Srwatson (off_t)0, UIO_SYSSPACE, IO_APPEND|IO_UNIT, cred, NULL, 237156889Srwatson NULL, td); 238156888Srwatson if (ret) 239156888Srwatson goto out; 240156888Srwatson } 241156888Srwatson 242156889Srwatson /* 243156889Srwatson * Convert the internal kernel record to BSM format and write it out 244156889Srwatson * if everything's OK. 245156888Srwatson */ 246156888Srwatson if (!(ar->k_ar_commit & AR_COMMIT_KERNEL)) { 247156888Srwatson ret = 0; 248156888Srwatson goto out; 249156888Srwatson } 250156888Srwatson 251156888Srwatson /* 252156888Srwatson * XXXAUDIT: Should we actually allow this conversion to fail? With 253156888Srwatson * sleeping memory allocation and invariants checks, perhaps not. 254156888Srwatson */ 255156888Srwatson ret = kaudit_to_bsm(ar, &bsm); 256156888Srwatson if (ret == BSM_NOAUDIT) { 257156888Srwatson ret = 0; 258156888Srwatson goto out; 259156888Srwatson } 260156888Srwatson 261156888Srwatson /* 262156889Srwatson * XXX: We drop the record on BSM conversion failure, but really this 263156889Srwatson * is an assertion failure. 264156888Srwatson */ 265156888Srwatson if (ret == BSM_FAILURE) { 266156888Srwatson AUDIT_PRINTF(("BSM conversion failure\n")); 267156888Srwatson ret = EINVAL; 268156888Srwatson goto out; 269156888Srwatson } 270156888Srwatson 271156888Srwatson /* 272156888Srwatson * Try submitting the record to any active audit pipes. 273156888Srwatson */ 274156888Srwatson audit_pipe_submit((void *)bsm->data, bsm->len); 275156889Srwatson 276156888Srwatson /* 277156889Srwatson * XXX We should break the write functionality away from the BSM 278156889Srwatson * record generation and have the BSM generation done before this 279156889Srwatson * function is called. This function will then take the BSM record as 280156889Srwatson * a parameter. 281156888Srwatson */ 282156889Srwatson ret = (vn_rdwr(UIO_WRITE, vp, (void *)bsm->data, bsm->len, (off_t)0, 283156889Srwatson UIO_SYSSPACE, IO_APPEND|IO_UNIT, cred, NULL, NULL, td)); 284156888Srwatson kau_free(bsm); 285156888Srwatson 286156888Srwatsonout: 287156888Srwatson /* 288156889Srwatson * When we're done processing the current record, we have to check to 289156889Srwatson * see if we're in a failure mode, and if so, whether this was the 290156889Srwatson * last record left to be drained. If we're done draining, then we 291156889Srwatson * fsync the vnode and panic. 292156888Srwatson */ 293156889Srwatson if (audit_in_failure && audit_q_len == 0 && audit_pre_q_len == 0) { 294156888Srwatson VOP_LOCK(vp, LK_DRAIN | LK_INTERLOCK, td); 295156888Srwatson (void)VOP_FSYNC(vp, MNT_WAIT, td); 296156888Srwatson VOP_UNLOCK(vp, 0, td); 297156888Srwatson panic("Audit store overflow; record queue drained."); 298156888Srwatson } 299156888Srwatson 300156888Srwatson VFS_UNLOCK_GIANT(vfslocked); 301156888Srwatson 302156888Srwatson return (ret); 303156888Srwatson} 304156888Srwatson 305156888Srwatson/* 306156888Srwatson * If an appropriate signal has been received rotate the audit log based on 307156888Srwatson * the global replacement variables. Signal consumers as needed that the 308156888Srwatson * rotation has taken place. 309156888Srwatson * 310156888Srwatson * XXXRW: The global variables and CVs used to signal the audit_worker to 311156888Srwatson * perform a rotation are essentially a message queue of depth 1. It would 312156888Srwatson * be much nicer to actually use a message queue. 313156888Srwatson */ 314156888Srwatsonstatic void 315156888Srwatsonaudit_worker_rotate(struct ucred **audit_credp, struct vnode **audit_vpp, 316156888Srwatson struct thread *audit_td) 317156888Srwatson{ 318156888Srwatson int do_replacement_signal, vfslocked; 319156888Srwatson struct ucred *old_cred; 320156888Srwatson struct vnode *old_vp; 321156888Srwatson 322156888Srwatson mtx_assert(&audit_mtx, MA_OWNED); 323156888Srwatson 324156888Srwatson do_replacement_signal = 0; 325156888Srwatson while (audit_replacement_flag != 0) { 326156888Srwatson old_cred = *audit_credp; 327156888Srwatson old_vp = *audit_vpp; 328156888Srwatson *audit_credp = audit_replacement_cred; 329156888Srwatson *audit_vpp = audit_replacement_vp; 330156888Srwatson audit_replacement_cred = NULL; 331156888Srwatson audit_replacement_vp = NULL; 332156888Srwatson audit_replacement_flag = 0; 333156888Srwatson 334156888Srwatson audit_enabled = (*audit_vpp != NULL); 335156888Srwatson 336156888Srwatson /* 337156888Srwatson * XXX: What to do about write failures here? 338156888Srwatson */ 339156888Srwatson if (old_vp != NULL) { 340156888Srwatson AUDIT_PRINTF(("Closing old audit file\n")); 341156888Srwatson mtx_unlock(&audit_mtx); 342156888Srwatson vfslocked = VFS_LOCK_GIANT(old_vp->v_mount); 343156888Srwatson vn_close(old_vp, AUDIT_CLOSE_FLAGS, old_cred, 344156888Srwatson audit_td); 345156888Srwatson VFS_UNLOCK_GIANT(vfslocked); 346156888Srwatson crfree(old_cred); 347156888Srwatson mtx_lock(&audit_mtx); 348156888Srwatson old_cred = NULL; 349156888Srwatson old_vp = NULL; 350156888Srwatson AUDIT_PRINTF(("Audit file closed\n")); 351156888Srwatson } 352156888Srwatson if (*audit_vpp != NULL) { 353156888Srwatson AUDIT_PRINTF(("Opening new audit file\n")); 354156888Srwatson } 355156888Srwatson do_replacement_signal = 1; 356156888Srwatson } 357156888Srwatson 358156888Srwatson /* 359156888Srwatson * Signal that replacement have occurred to wake up and 360156888Srwatson * start any other replacements started in parallel. We can 361156888Srwatson * continue about our business in the mean time. We 362156888Srwatson * broadcast so that both new replacements can be inserted, 363156888Srwatson * but also so that the source(s) of replacement can return 364156888Srwatson * successfully. 365156888Srwatson */ 366156888Srwatson if (do_replacement_signal) 367156888Srwatson cv_broadcast(&audit_replacement_cv); 368156888Srwatson} 369156888Srwatson 370156888Srwatson/* 371156888Srwatson * Drain the audit commit queue and free the records. Used if there are 372156888Srwatson * records present, but no audit log target. 373156888Srwatson */ 374156888Srwatsonstatic void 375156888Srwatsonaudit_worker_drain(void) 376156888Srwatson{ 377156888Srwatson struct kaudit_record *ar; 378156888Srwatson 379156888Srwatson while ((ar = TAILQ_FIRST(&audit_q))) { 380156888Srwatson TAILQ_REMOVE(&audit_q, ar, k_q); 381156888Srwatson audit_free(ar); 382156888Srwatson audit_q_len--; 383156888Srwatson } 384156888Srwatson} 385156888Srwatson 386156888Srwatson/* 387156888Srwatson * The audit_worker thread is responsible for watching the event queue, 388156888Srwatson * dequeueing records, converting them to BSM format, and committing them to 389156888Srwatson * disk. In order to minimize lock thrashing, records are dequeued in sets 390156888Srwatson * to a thread-local work queue. In addition, the audit_work performs the 391156888Srwatson * actual exchange of audit log vnode pointer, as audit_vp is a thread-local 392156888Srwatson * variable. 393156888Srwatson */ 394156888Srwatsonstatic void 395156888Srwatsonaudit_worker(void *arg) 396156888Srwatson{ 397156888Srwatson TAILQ_HEAD(, kaudit_record) ar_worklist; 398156888Srwatson struct kaudit_record *ar; 399156888Srwatson struct ucred *audit_cred; 400156888Srwatson struct thread *audit_td; 401156888Srwatson struct vnode *audit_vp; 402156888Srwatson int error, lowater_signal; 403156888Srwatson 404156888Srwatson AUDIT_PRINTF(("audit_worker starting\n")); 405156888Srwatson 406156888Srwatson /* 407156888Srwatson * These are thread-local variables requiring no synchronization. 408156888Srwatson */ 409156888Srwatson TAILQ_INIT(&ar_worklist); 410156888Srwatson audit_cred = NULL; 411156888Srwatson audit_td = curthread; 412156888Srwatson audit_vp = NULL; 413156888Srwatson 414156888Srwatson mtx_lock(&audit_mtx); 415156888Srwatson while (1) { 416156888Srwatson mtx_assert(&audit_mtx, MA_OWNED); 417156888Srwatson 418156888Srwatson /* 419156888Srwatson * Wait for record or rotation events. 420156888Srwatson */ 421156888Srwatson while (!audit_replacement_flag && TAILQ_EMPTY(&audit_q)) { 422156888Srwatson AUDIT_PRINTF(("audit_worker waiting\n")); 423159261Srwatson cv_wait(&audit_worker_cv, &audit_mtx); 424156888Srwatson AUDIT_PRINTF(("audit_worker woken up\n")); 425156888Srwatson AUDIT_PRINTF(("audit_worker: new vp = %p; value of " 426156888Srwatson "flag %d\n", audit_replacement_vp, 427156888Srwatson audit_replacement_flag)); 428156888Srwatson } 429156888Srwatson 430156888Srwatson /* 431156888Srwatson * First priority: replace the audit log target if requested. 432156888Srwatson */ 433156888Srwatson audit_worker_rotate(&audit_cred, &audit_vp, audit_td); 434156888Srwatson 435156888Srwatson /* 436156888Srwatson * If we have records, but there's no active vnode to write 437156888Srwatson * to, drain the record queue. Generally, we prevent the 438156888Srwatson * unnecessary allocation of records elsewhere, but we need 439156888Srwatson * to allow for races between conditional allocation and 440156888Srwatson * queueing. Go back to waiting when we're done. 441156888Srwatson */ 442156888Srwatson if (audit_vp == NULL) { 443156888Srwatson audit_worker_drain(); 444156888Srwatson continue; 445156888Srwatson } 446156888Srwatson 447156888Srwatson /* 448156888Srwatson * We have both records to write and an active vnode to write 449156888Srwatson * to. Dequeue a record, and start the write. Eventually, 450156888Srwatson * it might make sense to dequeue several records and perform 451156888Srwatson * our own clustering, if the lower layers aren't doing it 452156888Srwatson * automatically enough. 453156888Srwatson */ 454156888Srwatson lowater_signal = 0; 455156888Srwatson while ((ar = TAILQ_FIRST(&audit_q))) { 456156888Srwatson TAILQ_REMOVE(&audit_q, ar, k_q); 457156888Srwatson audit_q_len--; 458156888Srwatson if (audit_q_len == audit_qctrl.aq_lowater) 459156888Srwatson lowater_signal++; 460156888Srwatson TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q); 461156888Srwatson } 462156888Srwatson if (lowater_signal) 463159261Srwatson cv_broadcast(&audit_watermark_cv); 464156888Srwatson 465156888Srwatson mtx_unlock(&audit_mtx); 466156888Srwatson while ((ar = TAILQ_FIRST(&ar_worklist))) { 467156888Srwatson TAILQ_REMOVE(&ar_worklist, ar, k_q); 468156888Srwatson if (audit_vp != NULL) { 469156889Srwatson error = audit_record_write(audit_vp, ar, 470156888Srwatson audit_cred, audit_td); 471156888Srwatson if (error && audit_panic_on_write_fail) 472156888Srwatson panic("audit_worker: write error %d\n", 473156888Srwatson error); 474156888Srwatson else if (error) 475156888Srwatson printf("audit_worker: write error %d\n", 476156888Srwatson error); 477156888Srwatson } 478156888Srwatson audit_free(ar); 479156888Srwatson } 480156888Srwatson mtx_lock(&audit_mtx); 481156888Srwatson } 482156888Srwatson} 483156888Srwatson 484156888Srwatson/* 485156888Srwatson * audit_rotate_vnode() is called by a user or kernel thread to configure or 486156888Srwatson * de-configure auditing on a vnode. The arguments are the replacement 487156888Srwatson * credential and vnode to substitute for the current credential and vnode, 488156888Srwatson * if any. If either is set to NULL, both should be NULL, and this is used 489156888Srwatson * to indicate that audit is being disabled. The real work is done in the 490156888Srwatson * audit_worker thread, but audit_rotate_vnode() waits synchronously for that 491156888Srwatson * to complete. 492156888Srwatson * 493156888Srwatson * The vnode should be referenced and opened by the caller. The credential 494156888Srwatson * should be referenced. audit_rotate_vnode() will own both references as of 495156888Srwatson * this call, so the caller should not release either. 496156888Srwatson * 497156888Srwatson * XXXAUDIT: Review synchronize communication logic. Really, this is a 498156888Srwatson * message queue of depth 1. 499156888Srwatson * 500156888Srwatson * XXXAUDIT: Enhance the comments below to indicate that we are basically 501156888Srwatson * acquiring ownership of the communications queue, inserting our message, 502156888Srwatson * and waiting for an acknowledgement. 503156888Srwatson */ 504156888Srwatsonvoid 505156888Srwatsonaudit_rotate_vnode(struct ucred *cred, struct vnode *vp) 506156888Srwatson{ 507156888Srwatson 508156888Srwatson /* 509156888Srwatson * If other parallel log replacements have been requested, we wait 510156888Srwatson * until they've finished before continuing. 511156888Srwatson */ 512156888Srwatson mtx_lock(&audit_mtx); 513156888Srwatson while (audit_replacement_flag != 0) { 514156888Srwatson AUDIT_PRINTF(("audit_rotate_vnode: sleeping to wait for " 515156888Srwatson "flag\n")); 516156888Srwatson cv_wait(&audit_replacement_cv, &audit_mtx); 517156888Srwatson AUDIT_PRINTF(("audit_rotate_vnode: woken up (flag %d)\n", 518156888Srwatson audit_replacement_flag)); 519156888Srwatson } 520156888Srwatson audit_replacement_cred = cred; 521156888Srwatson audit_replacement_flag = 1; 522156888Srwatson audit_replacement_vp = vp; 523156888Srwatson 524156888Srwatson /* 525156888Srwatson * Wake up the audit worker to perform the exchange once we 526156888Srwatson * release the mutex. 527156888Srwatson */ 528159261Srwatson cv_signal(&audit_worker_cv); 529156888Srwatson 530156888Srwatson /* 531156888Srwatson * Wait for the audit_worker to broadcast that a replacement has 532156888Srwatson * taken place; we know that once this has happened, our vnode 533156888Srwatson * has been replaced in, so we can return successfully. 534156888Srwatson */ 535156888Srwatson AUDIT_PRINTF(("audit_rotate_vnode: waiting for news of " 536156888Srwatson "replacement\n")); 537156888Srwatson cv_wait(&audit_replacement_cv, &audit_mtx); 538156888Srwatson AUDIT_PRINTF(("audit_rotate_vnode: change acknowledged by " 539156888Srwatson "audit_worker (flag " "now %d)\n", audit_replacement_flag)); 540156888Srwatson mtx_unlock(&audit_mtx); 541156888Srwatson 542156888Srwatson audit_file_rotate_wait = 0; /* We can now request another rotation */ 543156888Srwatson} 544156888Srwatson 545156888Srwatsonvoid 546156888Srwatsonaudit_worker_init(void) 547156888Srwatson{ 548156888Srwatson int error; 549156888Srwatson 550156888Srwatson cv_init(&audit_replacement_cv, "audit_replacement_cv"); 551156888Srwatson error = kthread_create(audit_worker, NULL, &audit_thread, RFHIGHPID, 552156888Srwatson 0, "audit_worker"); 553156888Srwatson if (error) 554156888Srwatson panic("audit_worker_init: kthread_create returned %d", error); 555156888Srwatson} 556