1185573Srwatson/*- 2185573Srwatson * Copyright (c) 2004 Apple Inc. 3155131Srwatson * Copyright (c) 2005 Robert N. M. Watson 4155131Srwatson * All rights reserved. 5155131Srwatson * 6155131Srwatson * Redistribution and use in source and binary forms, with or without 7155131Srwatson * modification, are permitted provided that the following conditions 8155131Srwatson * are met: 9155131Srwatson * 1. Redistributions of source code must retain the above copyright 10155131Srwatson * notice, this list of conditions and the following disclaimer. 11155131Srwatson * 2. Redistributions in binary form must reproduce the above copyright 12155131Srwatson * notice, this list of conditions and the following disclaimer in the 13155131Srwatson * documentation and/or other materials provided with the distribution. 14185573Srwatson * 3. Neither the name of Apple Inc. ("Apple") nor the names of 15155131Srwatson * its contributors may be used to endorse or promote products derived 16155131Srwatson * from this software without specific prior written permission. 17155131Srwatson * 18155131Srwatson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND 19155131Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20155131Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21155131Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR 22155131Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23155131Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24155131Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25155131Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 26155131Srwatson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 27155131Srwatson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 28155131Srwatson * POSSIBILITY OF SUCH DAMAGE. 29155131Srwatson */ 30155131Srwatson 31155131Srwatson#include <sys/types.h> 32156283Srwatson 33156283Srwatson#include <config/config.h> 34156283Srwatson#ifdef HAVE_FULL_QUEUE_H 35155131Srwatson#include <sys/queue.h> 36156283Srwatson#else /* !HAVE_FULL_QUEUE_H */ 37156283Srwatson#include <compat/queue.h> 38156283Srwatson#endif /* !HAVE_FULL_QUEUE_H */ 39155131Srwatson 40155131Srwatson#include <bsm/libbsm.h> 41155131Srwatson 42186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 43155131Srwatson#include <pthread.h> 44186647Srwatson#endif 45155131Srwatson#include <stdlib.h> 46155131Srwatson#include <string.h> 47155131Srwatson 48155131Srwatson/* MT-Safe */ 49186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 50155131Srwatsonstatic pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; 51186647Srwatson#endif 52155131Srwatsonstatic int firsttime = 1; 53155131Srwatson 54155131Srwatson/* 55155131Srwatson * XXX ev_cache, once created, sticks around until the calling program exits. 56155131Srwatson * This may or may not be a problem as far as absolute memory usage goes, but 57155131Srwatson * at least there don't appear to be any leaks in using the cache. 58155131Srwatson * 59155131Srwatson * XXXRW: Note that despite (mutex), load_event_table() could race with 60155131Srwatson * other consumers of the getauevents() API. 61155131Srwatson */ 62155131Srwatsonstruct audit_event_map { 63155131Srwatson char ev_name[AU_EVENT_NAME_MAX]; 64155131Srwatson char ev_desc[AU_EVENT_DESC_MAX]; 65155131Srwatson struct au_event_ent ev; 66155131Srwatson LIST_ENTRY(audit_event_map) ev_list; 67155131Srwatson}; 68155131Srwatsonstatic LIST_HEAD(, audit_event_map) ev_cache; 69155131Srwatson 70155131Srwatsonstatic struct audit_event_map * 71155131Srwatsonaudit_event_map_alloc(void) 72155131Srwatson{ 73155131Srwatson struct audit_event_map *aemp; 74155131Srwatson 75155131Srwatson aemp = malloc(sizeof(*aemp)); 76155131Srwatson if (aemp == NULL) 77155131Srwatson return (aemp); 78155131Srwatson bzero(aemp, sizeof(*aemp)); 79155131Srwatson aemp->ev.ae_name = aemp->ev_name; 80155131Srwatson aemp->ev.ae_desc = aemp->ev_desc; 81155131Srwatson return (aemp); 82155131Srwatson} 83155131Srwatson 84155131Srwatsonstatic void 85155131Srwatsonaudit_event_map_free(struct audit_event_map *aemp) 86155131Srwatson{ 87155131Srwatson 88155131Srwatson free(aemp); 89155131Srwatson} 90155131Srwatson 91155131Srwatson/* 92155131Srwatson * When reading into the cache fails, we need to flush the entire cache to 93155131Srwatson * prevent it from containing some but not all records. 94155131Srwatson */ 95155131Srwatsonstatic void 96155131Srwatsonflush_cache(void) 97155131Srwatson{ 98155131Srwatson struct audit_event_map *aemp; 99155131Srwatson 100155131Srwatson /* XXX: Would assert 'mutex'. */ 101155131Srwatson 102155131Srwatson while ((aemp = LIST_FIRST(&ev_cache)) != NULL) { 103155131Srwatson LIST_REMOVE(aemp, ev_list); 104155131Srwatson audit_event_map_free(aemp); 105155131Srwatson } 106155131Srwatson} 107155131Srwatson 108155131Srwatsonstatic int 109155131Srwatsonload_event_table(void) 110155131Srwatson{ 111155131Srwatson struct audit_event_map *aemp; 112155131Srwatson struct au_event_ent *ep; 113155131Srwatson 114155131Srwatson /* 115155131Srwatson * XXX: Would assert 'mutex'. 116155131Srwatson * Loading of the cache happens only once; dont check if cache is 117155131Srwatson * already loaded. 118155131Srwatson */ 119155131Srwatson LIST_INIT(&ev_cache); 120155131Srwatson setauevent(); /* Rewind to beginning of entries. */ 121155131Srwatson do { 122155131Srwatson aemp = audit_event_map_alloc(); 123155131Srwatson if (aemp == NULL) { 124155131Srwatson flush_cache(); 125155131Srwatson return (-1); 126155131Srwatson } 127155131Srwatson ep = getauevent_r(&aemp->ev); 128155131Srwatson if (ep != NULL) 129155131Srwatson LIST_INSERT_HEAD(&ev_cache, aemp, ev_list); 130155131Srwatson else 131155131Srwatson audit_event_map_free(aemp); 132155131Srwatson } while (ep != NULL); 133155131Srwatson return (1); 134155131Srwatson} 135155131Srwatson 136155131Srwatson/* 137155131Srwatson * Read the event with the matching event number from the cache. 138155131Srwatson */ 139155131Srwatsonstatic struct au_event_ent * 140155131Srwatsonread_from_cache(au_event_t event) 141155131Srwatson{ 142155131Srwatson struct audit_event_map *elem; 143155131Srwatson 144155131Srwatson /* XXX: Would assert 'mutex'. */ 145155131Srwatson 146155131Srwatson LIST_FOREACH(elem, &ev_cache, ev_list) { 147155131Srwatson if (elem->ev.ae_number == event) 148155131Srwatson return (&elem->ev); 149155131Srwatson } 150155131Srwatson 151155131Srwatson return (NULL); 152155131Srwatson} 153155131Srwatson 154155131Srwatson/* 155155131Srwatson * Check if the audit event is preselected against the preselection mask. 156155131Srwatson */ 157155131Srwatsonint 158155131Srwatsonau_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag) 159155131Srwatson{ 160155131Srwatson struct au_event_ent *ev; 161155131Srwatson au_class_t effmask = 0; 162155131Srwatson 163155131Srwatson if (mask_p == NULL) 164155131Srwatson return (-1); 165155131Srwatson 166155131Srwatson 167186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 168155131Srwatson pthread_mutex_lock(&mutex); 169186647Srwatson#endif 170155131Srwatson if (firsttime) { 171155131Srwatson firsttime = 0; 172155131Srwatson if ( -1 == load_event_table()) { 173186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 174155131Srwatson pthread_mutex_unlock(&mutex); 175186647Srwatson#endif 176155131Srwatson return (-1); 177155131Srwatson } 178155131Srwatson } 179155131Srwatson switch (flag) { 180155131Srwatson case AU_PRS_REREAD: 181155131Srwatson flush_cache(); 182155131Srwatson if (load_event_table() == -1) { 183186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 184155131Srwatson pthread_mutex_unlock(&mutex); 185186647Srwatson#endif 186155131Srwatson return (-1); 187155131Srwatson } 188155131Srwatson ev = read_from_cache(event); 189155131Srwatson break; 190155131Srwatson case AU_PRS_USECACHE: 191155131Srwatson ev = read_from_cache(event); 192155131Srwatson break; 193155131Srwatson default: 194155131Srwatson ev = NULL; 195155131Srwatson } 196155131Srwatson if (ev == NULL) { 197186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 198155131Srwatson pthread_mutex_unlock(&mutex); 199186647Srwatson#endif 200155131Srwatson return (-1); 201155131Srwatson } 202155131Srwatson if (sorf & AU_PRS_SUCCESS) 203155131Srwatson effmask |= (mask_p->am_success & ev->ae_class); 204155131Srwatson if (sorf & AU_PRS_FAILURE) 205155131Srwatson effmask |= (mask_p->am_failure & ev->ae_class); 206186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK 207155131Srwatson pthread_mutex_unlock(&mutex); 208186647Srwatson#endif 209155131Srwatson if (effmask != 0) 210155131Srwatson return (1); 211155131Srwatson return (0); 212155131Srwatson} 213