Deleted Added
full compact
audit_worker.c (180709) audit_worker.c (181053)
1/*
1/*-
2 * Copyright (c) 1999-2005 Apple Inc.
3 * Copyright (c) 2006-2008 Robert N. M. Watson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
2 * Copyright (c) 1999-2005 Apple Inc.
3 * Copyright (c) 2006-2008 Robert N. M. Watson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/security/audit/audit_worker.c 180709 2008-07-22 16:44:48Z rwatson $");
32__FBSDID("$FreeBSD: head/sys/security/audit/audit_worker.c 181053 2008-07-31 09:54:35Z rwatson $");
33
34#include <sys/param.h>
35#include <sys/condvar.h>
36#include <sys/conf.h>
37#include <sys/file.h>
38#include <sys/filedesc.h>
39#include <sys/fcntl.h>
40#include <sys/ipc.h>
41#include <sys/kernel.h>
42#include <sys/kthread.h>
43#include <sys/malloc.h>
44#include <sys/mount.h>
45#include <sys/namei.h>
46#include <sys/proc.h>
47#include <sys/queue.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/protosw.h>
51#include <sys/domain.h>
52#include <sys/sx.h>
53#include <sys/sysproto.h>
54#include <sys/sysent.h>
55#include <sys/systm.h>
56#include <sys/ucred.h>
57#include <sys/uio.h>
58#include <sys/un.h>
59#include <sys/unistd.h>
60#include <sys/vnode.h>
61
62#include <bsm/audit.h>
63#include <bsm/audit_internal.h>
64#include <bsm/audit_kevents.h>
65
66#include <netinet/in.h>
67#include <netinet/in_pcb.h>
68
69#include <security/audit/audit.h>
70#include <security/audit/audit_private.h>
71
72#include <vm/uma.h>
73
74/*
75 * Worker thread that will schedule disk I/O, etc.
76 */
77static struct proc *audit_thread;
78
79/*
80 * audit_cred and audit_vp are the stored credential and vnode to use for
81 * active audit trail. They are protected by audit_worker_sx, which will be
82 * held across all I/O and all rotation to prevent them from being replaced
83 * (rotated) while in use. The audit_file_rotate_wait flag is set when the
84 * kernel has delivered a trigger to auditd to rotate the trail, and is
85 * cleared when the next rotation takes place. It is also protected by
86 * audit_worker_sx.
87 */
88static int audit_file_rotate_wait;
89static struct sx audit_worker_sx;
90static struct ucred *audit_cred;
91static struct vnode *audit_vp;
92
93/*
94 * Write an audit record to a file, performed as the last stage after both
95 * preselection and BSM conversion. Both space management and write failures
96 * are handled in this function.
97 *
98 * No attempt is made to deal with possible failure to deliver a trigger to
99 * the audit daemon, since the message is asynchronous anyway.
100 */
101static void
102audit_record_write(struct vnode *vp, struct ucred *cred, void *data,
103 size_t len)
104{
105 static struct timeval last_lowspace_trigger;
106 static struct timeval last_fail;
107 static int cur_lowspace_trigger;
108 struct statfs *mnt_stat;
109 int error, vfslocked;
110 static int cur_fail;
111 struct vattr vattr;
112 long temp;
113
114 sx_assert(&audit_worker_sx, SA_LOCKED); /* audit_file_rotate_wait. */
115
116 if (vp == NULL)
117 return;
118
119 mnt_stat = &vp->v_mount->mnt_stat;
120 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
121
122 /*
123 * First, gather statistics on the audit log file and file system so
124 * that we know how we're doing on space. Consider failure of these
125 * operations to indicate a future inability to write to the file.
126 */
127 error = VFS_STATFS(vp->v_mount, mnt_stat, curthread);
128 if (error)
129 goto fail;
130 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
131 error = VOP_GETATTR(vp, &vattr, cred, curthread);
132 VOP_UNLOCK(vp, 0);
133 if (error)
134 goto fail;
135 audit_fstat.af_currsz = vattr.va_size;
136
137 /*
138 * We handle four different space-related limits:
139 *
140 * - A fixed (hard) limit on the minimum free blocks we require on
141 * the file system, and results in record loss, a trigger, and
142 * possible fail stop due to violating invariants.
143 *
144 * - An administrative (soft) limit, which when fallen below, results
145 * in the kernel notifying the audit daemon of low space.
146 *
147 * - An audit trail size limit, which when gone above, results in the
148 * kernel notifying the audit daemon that rotation is desired.
149 *
150 * - The total depth of the kernel audit record exceeding free space,
151 * which can lead to possible fail stop (with drain), in order to
152 * prevent violating invariants. Failure here doesn't halt
153 * immediately, but prevents new records from being generated.
154 *
155 * Possibly, the last of these should be handled differently, always
156 * allowing a full queue to be lost, rather than trying to prevent
157 * loss.
158 *
159 * First, handle the hard limit, which generates a trigger and may
160 * fail stop. This is handled in the same manner as ENOSPC from
161 * VOP_WRITE, and results in record loss.
162 */
163 if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) {
164 error = ENOSPC;
165 goto fail_enospc;
166 }
167
168 /*
169 * Second, handle falling below the soft limit, if defined; we send
170 * the daemon a trigger and continue processing the record. Triggers
171 * are limited to 1/sec.
172 */
173 if (audit_qctrl.aq_minfree != 0) {
174 temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree);
175 if (mnt_stat->f_bfree < temp) {
176 if (ppsratecheck(&last_lowspace_trigger,
177 &cur_lowspace_trigger, 1)) {
178 (void)audit_send_trigger(
179 AUDIT_TRIGGER_LOW_SPACE);
180 printf("Warning: disk space low (< %d%% free) "
181 "on audit log file-system\n",
182 audit_qctrl.aq_minfree);
183 }
184 }
185 }
186
187 /*
188 * If the current file is getting full, generate a rotation trigger
189 * to the daemon. This is only approximate, which is fine as more
190 * records may be generated before the daemon rotates the file.
191 */
192 if ((audit_fstat.af_filesz != 0) && (audit_file_rotate_wait == 0) &&
193 (vattr.va_size >= audit_fstat.af_filesz)) {
194 sx_assert(&audit_worker_sx, SA_XLOCKED);
195
196 audit_file_rotate_wait = 1;
197 (void)audit_send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL);
198 }
199
200 /*
201 * If the estimated amount of audit data in the audit event queue
202 * (plus records allocated but not yet queued) has reached the amount
203 * of free space on the disk, then we need to go into an audit fail
204 * stop state, in which we do not permit the allocation/committing of
205 * any new audit records. We continue to process records but don't
206 * allow any activities that might generate new records. In the
207 * future, we might want to detect when space is available again and
208 * allow operation to continue, but this behavior is sufficient to
209 * meet fail stop requirements in CAPP.
210 */
211 if (audit_fail_stop) {
212 if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) *
213 MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >=
214 (unsigned long)(mnt_stat->f_bfree)) {
215 if (ppsratecheck(&last_fail, &cur_fail, 1))
216 printf("audit_record_write: free space "
217 "below size of audit queue, failing "
218 "stop\n");
219 audit_in_failure = 1;
220 } else if (audit_in_failure) {
221 /*
222 * Note: if we want to handle recovery, this is the
223 * spot to do it: unset audit_in_failure, and issue a
224 * wakeup on the cv.
225 */
226 }
227 }
228
229 error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE,
230 IO_APPEND|IO_UNIT, cred, NULL, NULL, curthread);
231 if (error == ENOSPC)
232 goto fail_enospc;
233 else if (error)
234 goto fail;
235
236 /*
237 * Catch completion of a queue drain here; if we're draining and the
238 * queue is now empty, fail stop. That audit_fail_stop is implicitly
239 * true, since audit_in_failure can only be set of audit_fail_stop is
240 * set.
241 *
242 * Note: if we handle recovery from audit_in_failure, then we need to
243 * make panic here conditional.
244 */
245 if (audit_in_failure) {
246 if (audit_q_len == 0 && audit_pre_q_len == 0) {
247 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
248 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
249 VOP_UNLOCK(vp, 0);
250 panic("Audit store overflow; record queue drained.");
251 }
252 }
253
254 VFS_UNLOCK_GIANT(vfslocked);
255 return;
256
257fail_enospc:
258 /*
259 * ENOSPC is considered a special case with respect to failures, as
260 * this can reflect either our preemptive detection of insufficient
261 * space, or ENOSPC returned by the vnode write call.
262 */
263 if (audit_fail_stop) {
264 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
265 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
266 VOP_UNLOCK(vp, 0);
267 panic("Audit log space exhausted and fail-stop set.");
268 }
269 (void)audit_send_trigger(AUDIT_TRIGGER_NO_SPACE);
270 audit_suspended = 1;
271
272 /* FALLTHROUGH */
273fail:
274 /*
275 * We have failed to write to the file, so the current record is
276 * lost, which may require an immediate system halt.
277 */
278 if (audit_panic_on_write_fail) {
279 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
280 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
281 VOP_UNLOCK(vp, 0);
282 panic("audit_worker: write error %d\n", error);
283 } else if (ppsratecheck(&last_fail, &cur_fail, 1))
284 printf("audit_worker: write error %d\n", error);
285 VFS_UNLOCK_GIANT(vfslocked);
286}
287
288/*
289 * Given a kernel audit record, process as required. Kernel audit records
290 * are converted to one, or possibly two, BSM records, depending on whether
291 * there is a user audit record present also. Kernel records need be
292 * converted to BSM before they can be written out. Both types will be
293 * written to disk, and audit pipes.
294 */
295static void
296audit_worker_process_record(struct kaudit_record *ar)
297{
298 struct au_record *bsm;
299 au_class_t class;
300 au_event_t event;
301 au_id_t auid;
302 int error, sorf;
303 int trail_locked;
304
305 /*
306 * We hold the audit_worker_sx lock over both writes, if there are
307 * two, so that the two records won't be split across a rotation and
308 * end up in two different trail files.
309 */
310 if (((ar->k_ar_commit & AR_COMMIT_USER) &&
311 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) ||
312 (ar->k_ar_commit & AR_PRESELECT_TRAIL)) {
313 sx_xlock(&audit_worker_sx);
314 trail_locked = 1;
315 } else
316 trail_locked = 0;
317
318 /*
319 * First, handle the user record, if any: commit to the system trail
320 * and audit pipes as selected.
321 */
322 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
323 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) {
324 sx_assert(&audit_worker_sx, SA_XLOCKED);
325 audit_record_write(audit_vp, audit_cred, ar->k_udata,
326 ar->k_ulen);
327 }
328
329 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
330 (ar->k_ar_commit & AR_PRESELECT_USER_PIPE))
331 audit_pipe_submit_user(ar->k_udata, ar->k_ulen);
332
333 if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) ||
334 ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 &&
335 (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0))
336 goto out;
337
338 auid = ar->k_ar.ar_subj_auid;
339 event = ar->k_ar.ar_event;
340 class = au_event_class(event);
341 if (ar->k_ar.ar_errno == 0)
342 sorf = AU_PRS_SUCCESS;
343 else
344 sorf = AU_PRS_FAILURE;
345
346 error = kaudit_to_bsm(ar, &bsm);
347 switch (error) {
348 case BSM_NOAUDIT:
349 goto out;
350
351 case BSM_FAILURE:
352 printf("audit_worker_process_record: BSM_FAILURE\n");
353 goto out;
354
355 case BSM_SUCCESS:
356 break;
357
358 default:
359 panic("kaudit_to_bsm returned %d", error);
360 }
361
362 if (ar->k_ar_commit & AR_PRESELECT_TRAIL) {
363 sx_assert(&audit_worker_sx, SA_XLOCKED);
364 audit_record_write(audit_vp, audit_cred, bsm->data, bsm->len);
365 }
366
367 if (ar->k_ar_commit & AR_PRESELECT_PIPE)
368 audit_pipe_submit(auid, event, class, sorf,
369 ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data,
370 bsm->len);
371
372 kau_free(bsm);
373out:
374 if (trail_locked)
375 sx_xunlock(&audit_worker_sx);
376}
377
378/*
379 * The audit_worker thread is responsible for watching the event queue,
380 * dequeueing records, converting them to BSM format, and committing them to
381 * disk. In order to minimize lock thrashing, records are dequeued in sets
382 * to a thread-local work queue.
383 *
384 * Note: this means that the effect bound on the size of the pending record
385 * queue is 2x the length of the global queue.
386 */
387static void
388audit_worker(void *arg)
389{
390 struct kaudit_queue ar_worklist;
391 struct kaudit_record *ar;
392 int lowater_signal;
393
394 TAILQ_INIT(&ar_worklist);
395 mtx_lock(&audit_mtx);
396 while (1) {
397 mtx_assert(&audit_mtx, MA_OWNED);
398
399 /*
400 * Wait for a record.
401 */
402 while (TAILQ_EMPTY(&audit_q))
403 cv_wait(&audit_worker_cv, &audit_mtx);
404
405 /*
406 * If there are records in the global audit record queue,
407 * transfer them to a thread-local queue and process them
408 * one by one. If we cross the low watermark threshold,
409 * signal any waiting processes that they may wake up and
410 * continue generating records.
411 */
412 lowater_signal = 0;
413 while ((ar = TAILQ_FIRST(&audit_q))) {
414 TAILQ_REMOVE(&audit_q, ar, k_q);
415 audit_q_len--;
416 if (audit_q_len == audit_qctrl.aq_lowater)
417 lowater_signal++;
418 TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q);
419 }
420 if (lowater_signal)
421 cv_broadcast(&audit_watermark_cv);
422
423 mtx_unlock(&audit_mtx);
424 while ((ar = TAILQ_FIRST(&ar_worklist))) {
425 TAILQ_REMOVE(&ar_worklist, ar, k_q);
426 audit_worker_process_record(ar);
427 audit_free(ar);
428 }
429 mtx_lock(&audit_mtx);
430 }
431}
432
433/*
434 * audit_rotate_vnode() is called by a user or kernel thread to configure or
435 * de-configure auditing on a vnode. The arguments are the replacement
436 * credential (referenced) and vnode (referenced and opened) to substitute
437 * for the current credential and vnode, if any. If either is set to NULL,
438 * both should be NULL, and this is used to indicate that audit is being
439 * disabled. Any previous cred/vnode will be closed and freed. We re-enable
440 * generating rotation requests to auditd.
441 */
442void
443audit_rotate_vnode(struct ucred *cred, struct vnode *vp)
444{
445 struct ucred *old_audit_cred;
446 struct vnode *old_audit_vp;
447 int vfslocked;
448
449 KASSERT((cred != NULL && vp != NULL) || (cred == NULL && vp == NULL),
450 ("audit_rotate_vnode: cred %p vp %p", cred, vp));
451
452 /*
453 * Rotate the vnode/cred, and clear the rotate flag so that we will
454 * send a rotate trigger if the new file fills.
455 */
456 sx_xlock(&audit_worker_sx);
457 old_audit_cred = audit_cred;
458 old_audit_vp = audit_vp;
459 audit_cred = cred;
460 audit_vp = vp;
461 audit_file_rotate_wait = 0;
462 audit_enabled = (audit_vp != NULL);
463 sx_xunlock(&audit_worker_sx);
464
465 /*
466 * If there was an old vnode/credential, close and free.
467 */
468 if (old_audit_vp != NULL) {
469 vfslocked = VFS_LOCK_GIANT(old_audit_vp->v_mount);
470 vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, old_audit_cred,
471 curthread);
472 VFS_UNLOCK_GIANT(vfslocked);
473 crfree(old_audit_cred);
474 }
475}
476
477void
478audit_worker_init(void)
479{
480 int error;
481
482 sx_init(&audit_worker_sx, "audit_worker_sx");
483 error = kproc_create(audit_worker, NULL, &audit_thread, RFHIGHPID,
484 0, "audit");
485 if (error)
486 panic("audit_worker_init: kproc_create returned %d", error);
487}
33
34#include <sys/param.h>
35#include <sys/condvar.h>
36#include <sys/conf.h>
37#include <sys/file.h>
38#include <sys/filedesc.h>
39#include <sys/fcntl.h>
40#include <sys/ipc.h>
41#include <sys/kernel.h>
42#include <sys/kthread.h>
43#include <sys/malloc.h>
44#include <sys/mount.h>
45#include <sys/namei.h>
46#include <sys/proc.h>
47#include <sys/queue.h>
48#include <sys/socket.h>
49#include <sys/socketvar.h>
50#include <sys/protosw.h>
51#include <sys/domain.h>
52#include <sys/sx.h>
53#include <sys/sysproto.h>
54#include <sys/sysent.h>
55#include <sys/systm.h>
56#include <sys/ucred.h>
57#include <sys/uio.h>
58#include <sys/un.h>
59#include <sys/unistd.h>
60#include <sys/vnode.h>
61
62#include <bsm/audit.h>
63#include <bsm/audit_internal.h>
64#include <bsm/audit_kevents.h>
65
66#include <netinet/in.h>
67#include <netinet/in_pcb.h>
68
69#include <security/audit/audit.h>
70#include <security/audit/audit_private.h>
71
72#include <vm/uma.h>
73
74/*
75 * Worker thread that will schedule disk I/O, etc.
76 */
77static struct proc *audit_thread;
78
79/*
80 * audit_cred and audit_vp are the stored credential and vnode to use for
81 * active audit trail. They are protected by audit_worker_sx, which will be
82 * held across all I/O and all rotation to prevent them from being replaced
83 * (rotated) while in use. The audit_file_rotate_wait flag is set when the
84 * kernel has delivered a trigger to auditd to rotate the trail, and is
85 * cleared when the next rotation takes place. It is also protected by
86 * audit_worker_sx.
87 */
88static int audit_file_rotate_wait;
89static struct sx audit_worker_sx;
90static struct ucred *audit_cred;
91static struct vnode *audit_vp;
92
93/*
94 * Write an audit record to a file, performed as the last stage after both
95 * preselection and BSM conversion. Both space management and write failures
96 * are handled in this function.
97 *
98 * No attempt is made to deal with possible failure to deliver a trigger to
99 * the audit daemon, since the message is asynchronous anyway.
100 */
101static void
102audit_record_write(struct vnode *vp, struct ucred *cred, void *data,
103 size_t len)
104{
105 static struct timeval last_lowspace_trigger;
106 static struct timeval last_fail;
107 static int cur_lowspace_trigger;
108 struct statfs *mnt_stat;
109 int error, vfslocked;
110 static int cur_fail;
111 struct vattr vattr;
112 long temp;
113
114 sx_assert(&audit_worker_sx, SA_LOCKED); /* audit_file_rotate_wait. */
115
116 if (vp == NULL)
117 return;
118
119 mnt_stat = &vp->v_mount->mnt_stat;
120 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
121
122 /*
123 * First, gather statistics on the audit log file and file system so
124 * that we know how we're doing on space. Consider failure of these
125 * operations to indicate a future inability to write to the file.
126 */
127 error = VFS_STATFS(vp->v_mount, mnt_stat, curthread);
128 if (error)
129 goto fail;
130 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
131 error = VOP_GETATTR(vp, &vattr, cred, curthread);
132 VOP_UNLOCK(vp, 0);
133 if (error)
134 goto fail;
135 audit_fstat.af_currsz = vattr.va_size;
136
137 /*
138 * We handle four different space-related limits:
139 *
140 * - A fixed (hard) limit on the minimum free blocks we require on
141 * the file system, and results in record loss, a trigger, and
142 * possible fail stop due to violating invariants.
143 *
144 * - An administrative (soft) limit, which when fallen below, results
145 * in the kernel notifying the audit daemon of low space.
146 *
147 * - An audit trail size limit, which when gone above, results in the
148 * kernel notifying the audit daemon that rotation is desired.
149 *
150 * - The total depth of the kernel audit record exceeding free space,
151 * which can lead to possible fail stop (with drain), in order to
152 * prevent violating invariants. Failure here doesn't halt
153 * immediately, but prevents new records from being generated.
154 *
155 * Possibly, the last of these should be handled differently, always
156 * allowing a full queue to be lost, rather than trying to prevent
157 * loss.
158 *
159 * First, handle the hard limit, which generates a trigger and may
160 * fail stop. This is handled in the same manner as ENOSPC from
161 * VOP_WRITE, and results in record loss.
162 */
163 if (mnt_stat->f_bfree < AUDIT_HARD_LIMIT_FREE_BLOCKS) {
164 error = ENOSPC;
165 goto fail_enospc;
166 }
167
168 /*
169 * Second, handle falling below the soft limit, if defined; we send
170 * the daemon a trigger and continue processing the record. Triggers
171 * are limited to 1/sec.
172 */
173 if (audit_qctrl.aq_minfree != 0) {
174 temp = mnt_stat->f_blocks / (100 / audit_qctrl.aq_minfree);
175 if (mnt_stat->f_bfree < temp) {
176 if (ppsratecheck(&last_lowspace_trigger,
177 &cur_lowspace_trigger, 1)) {
178 (void)audit_send_trigger(
179 AUDIT_TRIGGER_LOW_SPACE);
180 printf("Warning: disk space low (< %d%% free) "
181 "on audit log file-system\n",
182 audit_qctrl.aq_minfree);
183 }
184 }
185 }
186
187 /*
188 * If the current file is getting full, generate a rotation trigger
189 * to the daemon. This is only approximate, which is fine as more
190 * records may be generated before the daemon rotates the file.
191 */
192 if ((audit_fstat.af_filesz != 0) && (audit_file_rotate_wait == 0) &&
193 (vattr.va_size >= audit_fstat.af_filesz)) {
194 sx_assert(&audit_worker_sx, SA_XLOCKED);
195
196 audit_file_rotate_wait = 1;
197 (void)audit_send_trigger(AUDIT_TRIGGER_ROTATE_KERNEL);
198 }
199
200 /*
201 * If the estimated amount of audit data in the audit event queue
202 * (plus records allocated but not yet queued) has reached the amount
203 * of free space on the disk, then we need to go into an audit fail
204 * stop state, in which we do not permit the allocation/committing of
205 * any new audit records. We continue to process records but don't
206 * allow any activities that might generate new records. In the
207 * future, we might want to detect when space is available again and
208 * allow operation to continue, but this behavior is sufficient to
209 * meet fail stop requirements in CAPP.
210 */
211 if (audit_fail_stop) {
212 if ((unsigned long)((audit_q_len + audit_pre_q_len + 1) *
213 MAX_AUDIT_RECORD_SIZE) / mnt_stat->f_bsize >=
214 (unsigned long)(mnt_stat->f_bfree)) {
215 if (ppsratecheck(&last_fail, &cur_fail, 1))
216 printf("audit_record_write: free space "
217 "below size of audit queue, failing "
218 "stop\n");
219 audit_in_failure = 1;
220 } else if (audit_in_failure) {
221 /*
222 * Note: if we want to handle recovery, this is the
223 * spot to do it: unset audit_in_failure, and issue a
224 * wakeup on the cv.
225 */
226 }
227 }
228
229 error = vn_rdwr(UIO_WRITE, vp, data, len, (off_t)0, UIO_SYSSPACE,
230 IO_APPEND|IO_UNIT, cred, NULL, NULL, curthread);
231 if (error == ENOSPC)
232 goto fail_enospc;
233 else if (error)
234 goto fail;
235
236 /*
237 * Catch completion of a queue drain here; if we're draining and the
238 * queue is now empty, fail stop. That audit_fail_stop is implicitly
239 * true, since audit_in_failure can only be set of audit_fail_stop is
240 * set.
241 *
242 * Note: if we handle recovery from audit_in_failure, then we need to
243 * make panic here conditional.
244 */
245 if (audit_in_failure) {
246 if (audit_q_len == 0 && audit_pre_q_len == 0) {
247 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
248 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
249 VOP_UNLOCK(vp, 0);
250 panic("Audit store overflow; record queue drained.");
251 }
252 }
253
254 VFS_UNLOCK_GIANT(vfslocked);
255 return;
256
257fail_enospc:
258 /*
259 * ENOSPC is considered a special case with respect to failures, as
260 * this can reflect either our preemptive detection of insufficient
261 * space, or ENOSPC returned by the vnode write call.
262 */
263 if (audit_fail_stop) {
264 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
265 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
266 VOP_UNLOCK(vp, 0);
267 panic("Audit log space exhausted and fail-stop set.");
268 }
269 (void)audit_send_trigger(AUDIT_TRIGGER_NO_SPACE);
270 audit_suspended = 1;
271
272 /* FALLTHROUGH */
273fail:
274 /*
275 * We have failed to write to the file, so the current record is
276 * lost, which may require an immediate system halt.
277 */
278 if (audit_panic_on_write_fail) {
279 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
280 (void)VOP_FSYNC(vp, MNT_WAIT, curthread);
281 VOP_UNLOCK(vp, 0);
282 panic("audit_worker: write error %d\n", error);
283 } else if (ppsratecheck(&last_fail, &cur_fail, 1))
284 printf("audit_worker: write error %d\n", error);
285 VFS_UNLOCK_GIANT(vfslocked);
286}
287
288/*
289 * Given a kernel audit record, process as required. Kernel audit records
290 * are converted to one, or possibly two, BSM records, depending on whether
291 * there is a user audit record present also. Kernel records need be
292 * converted to BSM before they can be written out. Both types will be
293 * written to disk, and audit pipes.
294 */
295static void
296audit_worker_process_record(struct kaudit_record *ar)
297{
298 struct au_record *bsm;
299 au_class_t class;
300 au_event_t event;
301 au_id_t auid;
302 int error, sorf;
303 int trail_locked;
304
305 /*
306 * We hold the audit_worker_sx lock over both writes, if there are
307 * two, so that the two records won't be split across a rotation and
308 * end up in two different trail files.
309 */
310 if (((ar->k_ar_commit & AR_COMMIT_USER) &&
311 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) ||
312 (ar->k_ar_commit & AR_PRESELECT_TRAIL)) {
313 sx_xlock(&audit_worker_sx);
314 trail_locked = 1;
315 } else
316 trail_locked = 0;
317
318 /*
319 * First, handle the user record, if any: commit to the system trail
320 * and audit pipes as selected.
321 */
322 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
323 (ar->k_ar_commit & AR_PRESELECT_USER_TRAIL)) {
324 sx_assert(&audit_worker_sx, SA_XLOCKED);
325 audit_record_write(audit_vp, audit_cred, ar->k_udata,
326 ar->k_ulen);
327 }
328
329 if ((ar->k_ar_commit & AR_COMMIT_USER) &&
330 (ar->k_ar_commit & AR_PRESELECT_USER_PIPE))
331 audit_pipe_submit_user(ar->k_udata, ar->k_ulen);
332
333 if (!(ar->k_ar_commit & AR_COMMIT_KERNEL) ||
334 ((ar->k_ar_commit & AR_PRESELECT_PIPE) == 0 &&
335 (ar->k_ar_commit & AR_PRESELECT_TRAIL) == 0))
336 goto out;
337
338 auid = ar->k_ar.ar_subj_auid;
339 event = ar->k_ar.ar_event;
340 class = au_event_class(event);
341 if (ar->k_ar.ar_errno == 0)
342 sorf = AU_PRS_SUCCESS;
343 else
344 sorf = AU_PRS_FAILURE;
345
346 error = kaudit_to_bsm(ar, &bsm);
347 switch (error) {
348 case BSM_NOAUDIT:
349 goto out;
350
351 case BSM_FAILURE:
352 printf("audit_worker_process_record: BSM_FAILURE\n");
353 goto out;
354
355 case BSM_SUCCESS:
356 break;
357
358 default:
359 panic("kaudit_to_bsm returned %d", error);
360 }
361
362 if (ar->k_ar_commit & AR_PRESELECT_TRAIL) {
363 sx_assert(&audit_worker_sx, SA_XLOCKED);
364 audit_record_write(audit_vp, audit_cred, bsm->data, bsm->len);
365 }
366
367 if (ar->k_ar_commit & AR_PRESELECT_PIPE)
368 audit_pipe_submit(auid, event, class, sorf,
369 ar->k_ar_commit & AR_PRESELECT_TRAIL, bsm->data,
370 bsm->len);
371
372 kau_free(bsm);
373out:
374 if (trail_locked)
375 sx_xunlock(&audit_worker_sx);
376}
377
378/*
379 * The audit_worker thread is responsible for watching the event queue,
380 * dequeueing records, converting them to BSM format, and committing them to
381 * disk. In order to minimize lock thrashing, records are dequeued in sets
382 * to a thread-local work queue.
383 *
384 * Note: this means that the effect bound on the size of the pending record
385 * queue is 2x the length of the global queue.
386 */
387static void
388audit_worker(void *arg)
389{
390 struct kaudit_queue ar_worklist;
391 struct kaudit_record *ar;
392 int lowater_signal;
393
394 TAILQ_INIT(&ar_worklist);
395 mtx_lock(&audit_mtx);
396 while (1) {
397 mtx_assert(&audit_mtx, MA_OWNED);
398
399 /*
400 * Wait for a record.
401 */
402 while (TAILQ_EMPTY(&audit_q))
403 cv_wait(&audit_worker_cv, &audit_mtx);
404
405 /*
406 * If there are records in the global audit record queue,
407 * transfer them to a thread-local queue and process them
408 * one by one. If we cross the low watermark threshold,
409 * signal any waiting processes that they may wake up and
410 * continue generating records.
411 */
412 lowater_signal = 0;
413 while ((ar = TAILQ_FIRST(&audit_q))) {
414 TAILQ_REMOVE(&audit_q, ar, k_q);
415 audit_q_len--;
416 if (audit_q_len == audit_qctrl.aq_lowater)
417 lowater_signal++;
418 TAILQ_INSERT_TAIL(&ar_worklist, ar, k_q);
419 }
420 if (lowater_signal)
421 cv_broadcast(&audit_watermark_cv);
422
423 mtx_unlock(&audit_mtx);
424 while ((ar = TAILQ_FIRST(&ar_worklist))) {
425 TAILQ_REMOVE(&ar_worklist, ar, k_q);
426 audit_worker_process_record(ar);
427 audit_free(ar);
428 }
429 mtx_lock(&audit_mtx);
430 }
431}
432
433/*
434 * audit_rotate_vnode() is called by a user or kernel thread to configure or
435 * de-configure auditing on a vnode. The arguments are the replacement
436 * credential (referenced) and vnode (referenced and opened) to substitute
437 * for the current credential and vnode, if any. If either is set to NULL,
438 * both should be NULL, and this is used to indicate that audit is being
439 * disabled. Any previous cred/vnode will be closed and freed. We re-enable
440 * generating rotation requests to auditd.
441 */
442void
443audit_rotate_vnode(struct ucred *cred, struct vnode *vp)
444{
445 struct ucred *old_audit_cred;
446 struct vnode *old_audit_vp;
447 int vfslocked;
448
449 KASSERT((cred != NULL && vp != NULL) || (cred == NULL && vp == NULL),
450 ("audit_rotate_vnode: cred %p vp %p", cred, vp));
451
452 /*
453 * Rotate the vnode/cred, and clear the rotate flag so that we will
454 * send a rotate trigger if the new file fills.
455 */
456 sx_xlock(&audit_worker_sx);
457 old_audit_cred = audit_cred;
458 old_audit_vp = audit_vp;
459 audit_cred = cred;
460 audit_vp = vp;
461 audit_file_rotate_wait = 0;
462 audit_enabled = (audit_vp != NULL);
463 sx_xunlock(&audit_worker_sx);
464
465 /*
466 * If there was an old vnode/credential, close and free.
467 */
468 if (old_audit_vp != NULL) {
469 vfslocked = VFS_LOCK_GIANT(old_audit_vp->v_mount);
470 vn_close(old_audit_vp, AUDIT_CLOSE_FLAGS, old_audit_cred,
471 curthread);
472 VFS_UNLOCK_GIANT(vfslocked);
473 crfree(old_audit_cred);
474 }
475}
476
477void
478audit_worker_init(void)
479{
480 int error;
481
482 sx_init(&audit_worker_sx, "audit_worker_sx");
483 error = kproc_create(audit_worker, NULL, &audit_thread, RFHIGHPID,
484 0, "audit");
485 if (error)
486 panic("audit_worker_init: kproc_create returned %d", error);
487}