Deleted Added
full compact
28c28
< * $FreeBSD: head/sys/security/audit/audit_pipe.c 156884 2006-03-19 15:39:03Z rwatson $
---
> * $FreeBSD: head/sys/security/audit/audit_pipe.c 159269 2006-06-05 14:48:17Z rwatson $
58c58,59
< * should be very careful to avoid introducing event cycles.
---
> * should be very careful to avoid introducing event cycles. Consumers may
> * express interest via a set of preselection ioctls.
66a68,69
> static MALLOC_DEFINE(M_AUDIT_PIPE_PRESELECT, "audit_pipe_preselect",
> "Audit pipe preselection structure");
84a88,104
> * Audit pipes allow processes to express "interest" in the set of records
> * that are delivered via the pipe. They do this in a similar manner to the
> * mechanism for audit trail configuration, by expressing two global masks,
> * and optionally expressing per-auid masks. The following data structure is
> * the per-auid mask description. The global state is stored in the audit
> * pipe data structure.
> *
> * We may want to consider a more space/time-efficient data structure once
> * usage patterns for per-auid specifications are clear.
> */
> struct audit_pipe_preselect {
> au_id_t app_auid;
> au_mask_t app_mask;
> TAILQ_ENTRY(audit_pipe_preselect) app_list;
> };
>
> /*
104a125,137
> /*
> * Fields relating to pipe interest: global masks for unmatched
> * processes (attributable, non-attributable), and a list of specific
> * interest specifications by auid.
> */
> int ap_preselect_mode;
> au_mask_t ap_preselect_flags;
> au_mask_t ap_preselect_naflags;
> TAILQ_HEAD(, audit_pipe_preselect) ap_preselect_list;
>
> /*
> * Current pending record list.
> */
106a140,142
> /*
> * Global pipe list.
> */
111c147
< * Global list of audit pipes, mutex to protect it and the pipes. Finder
---
> * Global list of audit pipes, mutex to protect it and the pipes. Finer
118,119c154,156
< * This CV is used to wakeup on an audit record write. Eventually, it should
< * probably be per-pipe.
---
> * This CV is used to wakeup on an audit record write. Eventually, it might
> * be per-pipe to avoid unnecessary wakeups when several pipes with different
> * preselection masks are present.
141c178
< .d_flags = D_PSEUDO,
---
> .d_flags = D_PSEUDO | D_NEEDGIANT,
170c207,385
< * Apparent individual record to a queue -- allocate queue-local buffer, and
---
> * Find an audit pipe preselection specification for an auid, if any.
> */
> static struct audit_pipe_preselect *
> audit_pipe_preselect_find(struct audit_pipe *ap, au_id_t auid)
> {
> struct audit_pipe_preselect *app;
>
> mtx_assert(&audit_pipe_mtx, MA_OWNED);
>
> TAILQ_FOREACH(app, &ap->ap_preselect_list, app_list) {
> if (app->app_auid == auid)
> return (app);
> }
> return (NULL);
> }
>
> /*
> * Query the per-pipe mask for a specific auid.
> */
> static int
> audit_pipe_preselect_get(struct audit_pipe *ap, au_id_t auid,
> au_mask_t *maskp)
> {
> struct audit_pipe_preselect *app;
> int error;
>
> mtx_lock(&audit_pipe_mtx);
> app = audit_pipe_preselect_find(ap, auid);
> if (app != NULL) {
> *maskp = app->app_mask;
> error = 0;
> } else
> error = ENOENT;
> mtx_unlock(&audit_pipe_mtx);
> return (error);
> }
>
> /*
> * Set the per-pipe mask for a specific auid. Add a new entry if needed;
> * otherwise, update the current entry.
> */
> static void
> audit_pipe_preselect_set(struct audit_pipe *ap, au_id_t auid, au_mask_t mask)
> {
> struct audit_pipe_preselect *app, *app_new;
>
> /*
> * Pessimistically assume that the auid doesn't already have a mask
> * set, and allocate. We will free it if it is unneeded.
> */
> app_new = malloc(sizeof(*app_new), M_AUDIT_PIPE_PRESELECT, M_WAITOK);
> mtx_lock(&audit_pipe_mtx);
> app = audit_pipe_preselect_find(ap, auid);
> if (app == NULL) {
> app = app_new;
> app_new = NULL;
> app->app_auid = auid;
> TAILQ_INSERT_TAIL(&ap->ap_preselect_list, app, app_list);
> }
> app->app_mask = mask;
> mtx_unlock(&audit_pipe_mtx);
> if (app_new != NULL)
> free(app_new, M_AUDIT_PIPE_PRESELECT);
> }
>
> /*
> * Delete a per-auid mask on an audit pipe.
> */
> static int
> audit_pipe_preselect_delete(struct audit_pipe *ap, au_id_t auid)
> {
> struct audit_pipe_preselect *app;
> int error;
>
> mtx_lock(&audit_pipe_mtx);
> app = audit_pipe_preselect_find(ap, auid);
> if (app != NULL) {
> TAILQ_REMOVE(&ap->ap_preselect_list, app, app_list);
> error = 0;
> } else
> error = ENOENT;
> mtx_unlock(&audit_pipe_mtx);
> if (app != NULL)
> free(app, M_AUDIT_PIPE_PRESELECT);
> return (error);
> }
>
> /*
> * Delete all per-auid masks on an audit pipe.
> */
> static void
> audit_pipe_preselect_flush_locked(struct audit_pipe *ap)
> {
> struct audit_pipe_preselect *app;
>
> mtx_assert(&audit_pipe_mtx, MA_OWNED);
>
> while ((app = TAILQ_FIRST(&ap->ap_preselect_list)) != NULL) {
> TAILQ_REMOVE(&ap->ap_preselect_list, app, app_list);
> free(app, M_AUDIT_PIPE_PRESELECT);
> }
> }
>
> static void
> audit_pipe_preselect_flush(struct audit_pipe *ap)
> {
>
> mtx_lock(&audit_pipe_mtx);
> audit_pipe_preselect_flush_locked(ap);
> mtx_unlock(&audit_pipe_mtx);
> }
>
> /*
> * Determine whether a specific audit pipe matches a record with these
> * properties. Algorithm is as follows:
> *
> * - If the pipe is configured to track the default trail configuration, then
> * use the results of global preselection matching.
> * - If not, search for a specifically configured auid entry matching the
> * event. If an entry is found, use that.
> * - Otherwise, use the default flags or naflags configured for the pipe.
> */
> static int
> audit_pipe_preselect_check(struct audit_pipe *ap, au_id_t auid,
> au_event_t event, au_class_t class, int sorf, int trail_preselect)
> {
> struct audit_pipe_preselect *app;
>
> mtx_assert(&audit_pipe_mtx, MA_OWNED);
>
> switch (ap->ap_preselect_mode) {
> case AUDITPIPE_PRESELECT_MODE_TRAIL:
> return (trail_preselect);
>
> case AUDITPIPE_PRESELECT_MODE_LOCAL:
> app = audit_pipe_preselect_find(ap, auid);
> if (app == NULL) {
> if (auid == AU_DEFAUDITID)
> return (au_preselect(event, class,
> &ap->ap_preselect_naflags, sorf));
> else
> return (au_preselect(event, class,
> &ap->ap_preselect_flags, sorf));
> } else
> return (au_preselect(event, class, &app->app_mask,
> sorf));
>
> default:
> panic("audit_pipe_preselect_check: mode %d",
> ap->ap_preselect_mode);
> }
>
> return (0);
> }
>
> /*
> * Determine whether there exists a pipe interested in a record with specific
> * properties.
> */
> int
> audit_pipe_preselect(au_id_t auid, au_event_t event, au_class_t class,
> int sorf, int trail_preselect)
> {
> struct audit_pipe *ap;
>
> mtx_lock(&audit_pipe_mtx);
> TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) {
> if (audit_pipe_preselect_check(ap, auid, event, class, sorf,
> trail_preselect)) {
> mtx_unlock(&audit_pipe_mtx);
> return (1);
> }
> }
> mtx_unlock(&audit_pipe_mtx);
> return (0);
> }
>
> /*
> * Append individual record to a queue -- allocate queue-local buffer, and
222c437,438
< audit_pipe_submit(void *record, u_int record_len)
---
> audit_pipe_submit(au_id_t auid, au_event_t event, au_class_t class, int sorf,
> int trail_select, void *record, u_int record_len)
232a449,478
> TAILQ_FOREACH(ap, &audit_pipe_list, ap_list) {
> if (audit_pipe_preselect_check(ap, auid, event, class, sorf,
> trail_select))
> audit_pipe_append(ap, record, record_len);
> }
> audit_pipe_records++;
> mtx_unlock(&audit_pipe_mtx);
> cv_signal(&audit_pipe_cv);
> }
>
> /*
> * audit_pipe_submit_user(): the same as audit_pipe_submit(), except that
> * since we don't currently have selection information available, it is
> * delivered to the pipe unconditionally.
> *
> * XXXRW: This is a bug. The BSM check routine for submitting a user record
> * should parse that information and return it.
> */
> void
> audit_pipe_submit_user(void *record, u_int record_len)
> {
> struct audit_pipe *ap;
>
> /*
> * Lockless read to avoid mutex overhead if pipes are not in use.
> */
> if (TAILQ_FIRST(&audit_pipe_list) == NULL)
> return;
>
> mtx_lock(&audit_pipe_mtx);
239a486
>
241c488
< * Read the next record off of an audit pipe.
---
> * Pop the next record off of an audit pipe.
275a523,535
>
> /*
> * Default flags, naflags, and auid-specific preselection settings to
> * 0. Initialize the mode to the global trail so that if praudit(1)
> * is run on /dev/auditpipe, it sees events associated with the
> * default trail. Pipe-aware application can clear the flag, set
> * custom masks, and flush the pipe as needed.
> */
> bzero(&ap->ap_preselect_flags, sizeof(ap->ap_preselect_flags));
> bzero(&ap->ap_preselect_naflags, sizeof(ap->ap_preselect_naflags));
> TAILQ_INIT(&ap->ap_preselect_list);
> ap->ap_preselect_mode = AUDITPIPE_PRESELECT_MODE_TRAIL;
>
278a539
>
283,284c544
< * Free an audit pipe. Assumes mutex is held, audit_pipe is still on the
< * global list. Frees any audit pipe entries in the queue.
---
> * Flush all records currently present in an audit pipe; assume mutex is held.
287c547
< audit_pipe_free(struct audit_pipe *ap)
---
> audit_pipe_flush(struct audit_pipe *ap)
293d552
< TAILQ_REMOVE(&audit_pipe_list, ap, ap_list);
299a559,575
> }
>
> /*
> * Free an audit pipe; this means freeing all preselection state and all
> * records in the pipe. Assumes mutex is held to prevent any new records
> * from being inserted during the free, and that the audit pipe is still on
> * the global list.
> */
> static void
> audit_pipe_free(struct audit_pipe *ap)
> {
>
> mtx_assert(&audit_pipe_mtx, MA_OWNED);
>
> audit_pipe_preselect_flush_locked(ap);
> audit_pipe_flush(ap);
> TAILQ_REMOVE(&audit_pipe_list, ap, ap_list);
393a670
> struct auditpipe_ioctl_preselect *aip;
395c672,674
< int error;
---
> au_mask_t *maskp;
> int error, mode;
> au_id_t auid;
398a678,683
>
> /*
> * Audit pipe ioctls: first come standard device node ioctls, then
> * manipulation of pipe settings, and finally, statistics query
> * ioctls.
> */
469a755,838
> case AUDITPIPE_GET_PRESELECT_FLAGS:
> mtx_lock(&audit_pipe_mtx);
> maskp = (au_mask_t *)data;
> *maskp = ap->ap_preselect_flags;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> case AUDITPIPE_SET_PRESELECT_FLAGS:
> mtx_lock(&audit_pipe_mtx);
> maskp = (au_mask_t *)data;
> ap->ap_preselect_flags = *maskp;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> case AUDITPIPE_GET_PRESELECT_NAFLAGS:
> mtx_lock(&audit_pipe_mtx);
> maskp = (au_mask_t *)data;
> *maskp = ap->ap_preselect_naflags;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> case AUDITPIPE_SET_PRESELECT_NAFLAGS:
> mtx_lock(&audit_pipe_mtx);
> maskp = (au_mask_t *)data;
> ap->ap_preselect_naflags = *maskp;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> case AUDITPIPE_GET_PRESELECT_AUID:
> aip = (struct auditpipe_ioctl_preselect *)data;
> error = audit_pipe_preselect_get(ap, aip->aip_auid,
> &aip->aip_mask);
> break;
>
> case AUDITPIPE_SET_PRESELECT_AUID:
> aip = (struct auditpipe_ioctl_preselect *)data;
> audit_pipe_preselect_set(ap, aip->aip_auid, aip->aip_mask);
> error = 0;
> break;
>
> case AUDITPIPE_DELETE_PRESELECT_AUID:
> auid = *(au_id_t *)data;
> error = audit_pipe_preselect_delete(ap, auid);
> break;
>
> case AUDITPIPE_FLUSH_PRESELECT_AUID:
> audit_pipe_preselect_flush(ap);
> error = 0;
> break;
>
> case AUDITPIPE_GET_PRESELECT_MODE:
> mtx_lock(&audit_pipe_mtx);
> *(int *)data = ap->ap_preselect_mode;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> case AUDITPIPE_SET_PRESELECT_MODE:
> mode = *(int *)data;
> switch (mode) {
> case AUDITPIPE_PRESELECT_MODE_TRAIL:
> case AUDITPIPE_PRESELECT_MODE_LOCAL:
> mtx_lock(&audit_pipe_mtx);
> ap->ap_preselect_mode = mode;
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
> default:
> error = EINVAL;
> }
> break;
>
> case AUDITPIPE_FLUSH:
> mtx_lock(&audit_pipe_mtx);
> audit_pipe_flush(ap);
> mtx_unlock(&audit_pipe_mtx);
> error = 0;
> break;
>
498a868,878
> *
> * Providing more sophisticated behavior, such as partial reads, is tricky
> * due to the potential for parallel I/O. If partial read support is
> * required, it will require a per-pipe "current record being read" along
> * with an offset into that trecord which has already been read. Threads
> * performing partial reads will need to allocate per-thread copies of the
> * data so that if another thread completes the read of the record, it can be
> * freed without adding reference count logic. If this is added, a flag to
> * indicate that only atomic record reads are desired would be useful, as if
> * different threads are all waiting for records on the pipe, they will want
> * independent record reads, which is currently the behavior.