bsm_mask.c revision 156283
1155131Srwatson/* 2155131Srwatson * Copyright (c) 2004 Apple Computer, Inc. 3155131Srwatson * Copyright (c) 2005 Robert N. M. Watson 4155131Srwatson * All rights reserved. 5155131Srwatson * 6155131Srwatson * Redistribution and use in source and binary forms, with or without 7155131Srwatson * modification, are permitted provided that the following conditions 8155131Srwatson * are met: 9155131Srwatson * 1. Redistributions of source code must retain the above copyright 10155131Srwatson * notice, this list of conditions and the following disclaimer. 11155131Srwatson * 2. Redistributions in binary form must reproduce the above copyright 12155131Srwatson * notice, this list of conditions and the following disclaimer in the 13155131Srwatson * documentation and/or other materials provided with the distribution. 14155131Srwatson * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of 15155131Srwatson * its contributors may be used to endorse or promote products derived 16155131Srwatson * from this software without specific prior written permission. 17155131Srwatson * 18155131Srwatson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND 19155131Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20155131Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21155131Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR 22155131Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23155131Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24155131Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25155131Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26155131Srwatson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27155131Srwatson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28155131Srwatson * POSSIBILITY OF SUCH DAMAGE. 29155131Srwatson * 30156283Srwatson * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#13 $ 31155131Srwatson */ 32155131Srwatson 33155131Srwatson#include <sys/types.h> 34156283Srwatson 35156283Srwatson#include <config/config.h> 36156283Srwatson#ifdef HAVE_FULL_QUEUE_H 37155131Srwatson#include <sys/queue.h> 38156283Srwatson#else /* !HAVE_FULL_QUEUE_H */ 39156283Srwatson#include <compat/queue.h> 40156283Srwatson#endif /* !HAVE_FULL_QUEUE_H */ 41155131Srwatson 42155131Srwatson#include <bsm/libbsm.h> 43155131Srwatson 44155131Srwatson#include <pthread.h> 45155131Srwatson#include <stdlib.h> 46155131Srwatson#include <string.h> 47155131Srwatson 48155131Srwatson/* MT-Safe */ 49155131Srwatsonstatic pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 50155131Srwatsonstatic int firsttime = 1; 51155131Srwatson 52155131Srwatson/* 53155131Srwatson * XXX ev_cache, once created, sticks around until the calling program exits. 54155131Srwatson * This may or may not be a problem as far as absolute memory usage goes, but 55155131Srwatson * at least there don't appear to be any leaks in using the cache. 56155131Srwatson * 57155131Srwatson * XXXRW: Note that despite (mutex), load_event_table() could race with 58155131Srwatson * other consumers of the getauevents() API. 59155131Srwatson */ 60155131Srwatsonstruct audit_event_map { 61155131Srwatson char ev_name[AU_EVENT_NAME_MAX]; 62155131Srwatson char ev_desc[AU_EVENT_DESC_MAX]; 63155131Srwatson struct au_event_ent ev; 64155131Srwatson LIST_ENTRY(audit_event_map) ev_list; 65155131Srwatson}; 66155131Srwatsonstatic LIST_HEAD(, audit_event_map) ev_cache; 67155131Srwatson 68155131Srwatsonstatic struct audit_event_map * 69155131Srwatsonaudit_event_map_alloc(void) 70155131Srwatson{ 71155131Srwatson struct audit_event_map *aemp; 72155131Srwatson 73155131Srwatson aemp = malloc(sizeof(*aemp)); 74155131Srwatson if (aemp == NULL) 75155131Srwatson return (aemp); 76155131Srwatson bzero(aemp, sizeof(*aemp)); 77155131Srwatson aemp->ev.ae_name = aemp->ev_name; 78155131Srwatson aemp->ev.ae_desc = aemp->ev_desc; 79155131Srwatson return (aemp); 80155131Srwatson} 81155131Srwatson 82155131Srwatsonstatic void 83155131Srwatsonaudit_event_map_free(struct audit_event_map *aemp) 84155131Srwatson{ 85155131Srwatson 86155131Srwatson free(aemp); 87155131Srwatson} 88155131Srwatson 89155131Srwatson/* 90155131Srwatson * When reading into the cache fails, we need to flush the entire cache to 91155131Srwatson * prevent it from containing some but not all records. 92155131Srwatson */ 93155131Srwatsonstatic void 94155131Srwatsonflush_cache(void) 95155131Srwatson{ 96155131Srwatson struct audit_event_map *aemp; 97155131Srwatson 98155131Srwatson /* XXX: Would assert 'mutex'. */ 99155131Srwatson 100155131Srwatson while ((aemp = LIST_FIRST(&ev_cache)) != NULL) { 101155131Srwatson LIST_REMOVE(aemp, ev_list); 102155131Srwatson audit_event_map_free(aemp); 103155131Srwatson } 104155131Srwatson} 105155131Srwatson 106155131Srwatsonstatic int 107155131Srwatsonload_event_table(void) 108155131Srwatson{ 109155131Srwatson struct audit_event_map *aemp; 110155131Srwatson struct au_event_ent *ep; 111155131Srwatson 112155131Srwatson /* 113155131Srwatson * XXX: Would assert 'mutex'. 114155131Srwatson * Loading of the cache happens only once; dont check if cache is 115155131Srwatson * already loaded. 116155131Srwatson */ 117155131Srwatson LIST_INIT(&ev_cache); 118155131Srwatson setauevent(); /* Rewind to beginning of entries. */ 119155131Srwatson do { 120155131Srwatson aemp = audit_event_map_alloc(); 121155131Srwatson if (aemp == NULL) { 122155131Srwatson flush_cache(); 123155131Srwatson return (-1); 124155131Srwatson } 125155131Srwatson ep = getauevent_r(&aemp->ev); 126155131Srwatson if (ep != NULL) 127155131Srwatson LIST_INSERT_HEAD(&ev_cache, aemp, ev_list); 128155131Srwatson else 129155131Srwatson audit_event_map_free(aemp); 130155131Srwatson } while (ep != NULL); 131155131Srwatson return (1); 132155131Srwatson} 133155131Srwatson 134155131Srwatson/* 135155131Srwatson * Read the event with the matching event number from the cache. 136155131Srwatson */ 137155131Srwatsonstatic struct au_event_ent * 138155131Srwatsonread_from_cache(au_event_t event) 139155131Srwatson{ 140155131Srwatson struct audit_event_map *elem; 141155131Srwatson 142155131Srwatson /* XXX: Would assert 'mutex'. */ 143155131Srwatson 144155131Srwatson LIST_FOREACH(elem, &ev_cache, ev_list) { 145155131Srwatson if (elem->ev.ae_number == event) 146155131Srwatson return (&elem->ev); 147155131Srwatson } 148155131Srwatson 149155131Srwatson return (NULL); 150155131Srwatson} 151155131Srwatson 152155131Srwatson/* 153155131Srwatson * Check if the audit event is preselected against the preselection mask. 154155131Srwatson */ 155155131Srwatsonint 156155131Srwatsonau_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag) 157155131Srwatson{ 158155131Srwatson struct au_event_ent *ev; 159155131Srwatson au_class_t effmask = 0; 160155131Srwatson 161155131Srwatson if (mask_p == NULL) 162155131Srwatson return (-1); 163155131Srwatson 164155131Srwatson 165155131Srwatson pthread_mutex_lock(&mutex); 166155131Srwatson if (firsttime) { 167155131Srwatson firsttime = 0; 168155131Srwatson if ( -1 == load_event_table()) { 169155131Srwatson pthread_mutex_unlock(&mutex); 170155131Srwatson return (-1); 171155131Srwatson } 172155131Srwatson } 173155131Srwatson switch (flag) { 174155131Srwatson case AU_PRS_REREAD: 175155131Srwatson flush_cache(); 176155131Srwatson if (load_event_table() == -1) { 177155131Srwatson pthread_mutex_unlock(&mutex); 178155131Srwatson return (-1); 179155131Srwatson } 180155131Srwatson ev = read_from_cache(event); 181155131Srwatson break; 182155131Srwatson case AU_PRS_USECACHE: 183155131Srwatson ev = read_from_cache(event); 184155131Srwatson break; 185155131Srwatson default: 186155131Srwatson ev = NULL; 187155131Srwatson } 188155131Srwatson if (ev == NULL) { 189155131Srwatson pthread_mutex_unlock(&mutex); 190155131Srwatson return (-1); 191155131Srwatson } 192155131Srwatson if (sorf & AU_PRS_SUCCESS) 193155131Srwatson effmask |= (mask_p->am_success & ev->ae_class); 194155131Srwatson if (sorf & AU_PRS_FAILURE) 195155131Srwatson effmask |= (mask_p->am_failure & ev->ae_class); 196155131Srwatson pthread_mutex_unlock(&mutex); 197155131Srwatson if (effmask != 0) 198155131Srwatson return (1); 199155131Srwatson return (0); 200155131Srwatson} 201