1185573Srwatson/*-
2185573Srwatson * Copyright (c) 2004 Apple Inc.
3155131Srwatson * Copyright (c) 2005 Robert N. M. Watson
4155131Srwatson * All rights reserved.
5155131Srwatson *
6155131Srwatson * Redistribution and use in source and binary forms, with or without
7155131Srwatson * modification, are permitted provided that the following conditions
8155131Srwatson * are met:
9155131Srwatson * 1.  Redistributions of source code must retain the above copyright
10155131Srwatson *     notice, this list of conditions and the following disclaimer.
11155131Srwatson * 2.  Redistributions in binary form must reproduce the above copyright
12155131Srwatson *     notice, this list of conditions and the following disclaimer in the
13155131Srwatson *     documentation and/or other materials provided with the distribution.
14185573Srwatson * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15155131Srwatson *     its contributors may be used to endorse or promote products derived
16155131Srwatson *     from this software without specific prior written permission.
17155131Srwatson *
18155131Srwatson * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19155131Srwatson * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20155131Srwatson * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21155131Srwatson * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22155131Srwatson * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23155131Srwatson * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24155131Srwatson * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25155131Srwatson * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26155131Srwatson * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27155131Srwatson * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28155131Srwatson * POSSIBILITY OF SUCH DAMAGE.
29155131Srwatson *
30186647Srwatson * $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#15 $
31155131Srwatson */
32155131Srwatson
33155131Srwatson#include <sys/types.h>
34156283Srwatson
35156283Srwatson#include <config/config.h>
36156283Srwatson#ifdef HAVE_FULL_QUEUE_H
37155131Srwatson#include <sys/queue.h>
38156283Srwatson#else /* !HAVE_FULL_QUEUE_H */
39156283Srwatson#include <compat/queue.h>
40156283Srwatson#endif /* !HAVE_FULL_QUEUE_H */
41155131Srwatson
42155131Srwatson#include <bsm/libbsm.h>
43155131Srwatson
44186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
45155131Srwatson#include <pthread.h>
46186647Srwatson#endif
47155131Srwatson#include <stdlib.h>
48155131Srwatson#include <string.h>
49155131Srwatson
50155131Srwatson/* MT-Safe */
51186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
52155131Srwatsonstatic pthread_mutex_t	mutex = PTHREAD_MUTEX_INITIALIZER;
53186647Srwatson#endif
54155131Srwatsonstatic int		firsttime = 1;
55155131Srwatson
56155131Srwatson/*
57155131Srwatson * XXX ev_cache, once created, sticks around until the calling program exits.
58155131Srwatson * This may or may not be a problem as far as absolute memory usage goes, but
59155131Srwatson * at least there don't appear to be any leaks in using the cache.
60155131Srwatson *
61155131Srwatson * XXXRW: Note that despite (mutex), load_event_table() could race with
62155131Srwatson * other consumers of the getauevents() API.
63155131Srwatson */
64155131Srwatsonstruct audit_event_map {
65155131Srwatson	char				 ev_name[AU_EVENT_NAME_MAX];
66155131Srwatson	char				 ev_desc[AU_EVENT_DESC_MAX];
67155131Srwatson	struct au_event_ent		 ev;
68155131Srwatson	LIST_ENTRY(audit_event_map)	 ev_list;
69155131Srwatson};
70155131Srwatsonstatic LIST_HEAD(, audit_event_map)	ev_cache;
71155131Srwatson
72155131Srwatsonstatic struct audit_event_map *
73155131Srwatsonaudit_event_map_alloc(void)
74155131Srwatson{
75155131Srwatson	struct audit_event_map *aemp;
76155131Srwatson
77155131Srwatson	aemp = malloc(sizeof(*aemp));
78155131Srwatson	if (aemp == NULL)
79155131Srwatson		return (aemp);
80155131Srwatson	bzero(aemp, sizeof(*aemp));
81155131Srwatson	aemp->ev.ae_name = aemp->ev_name;
82155131Srwatson	aemp->ev.ae_desc = aemp->ev_desc;
83155131Srwatson	return (aemp);
84155131Srwatson}
85155131Srwatson
86155131Srwatsonstatic void
87155131Srwatsonaudit_event_map_free(struct audit_event_map *aemp)
88155131Srwatson{
89155131Srwatson
90155131Srwatson	free(aemp);
91155131Srwatson}
92155131Srwatson
93155131Srwatson/*
94155131Srwatson * When reading into the cache fails, we need to flush the entire cache to
95155131Srwatson * prevent it from containing some but not all records.
96155131Srwatson */
97155131Srwatsonstatic void
98155131Srwatsonflush_cache(void)
99155131Srwatson{
100155131Srwatson	struct audit_event_map *aemp;
101155131Srwatson
102155131Srwatson	/* XXX: Would assert 'mutex'. */
103155131Srwatson
104155131Srwatson	while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
105155131Srwatson		LIST_REMOVE(aemp, ev_list);
106155131Srwatson		audit_event_map_free(aemp);
107155131Srwatson	}
108155131Srwatson}
109155131Srwatson
110155131Srwatsonstatic int
111155131Srwatsonload_event_table(void)
112155131Srwatson{
113155131Srwatson	struct audit_event_map *aemp;
114155131Srwatson	struct au_event_ent *ep;
115155131Srwatson
116155131Srwatson	/*
117155131Srwatson	 * XXX: Would assert 'mutex'.
118155131Srwatson	 * Loading of the cache happens only once; dont check if cache is
119155131Srwatson	 * already loaded.
120155131Srwatson	 */
121155131Srwatson	LIST_INIT(&ev_cache);
122155131Srwatson	setauevent();	/* Rewind to beginning of entries. */
123155131Srwatson	do {
124155131Srwatson		aemp = audit_event_map_alloc();
125155131Srwatson		if (aemp == NULL) {
126155131Srwatson			flush_cache();
127155131Srwatson			return (-1);
128155131Srwatson		}
129155131Srwatson		ep = getauevent_r(&aemp->ev);
130155131Srwatson		if (ep != NULL)
131155131Srwatson			LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
132155131Srwatson		else
133155131Srwatson			audit_event_map_free(aemp);
134155131Srwatson	} while (ep != NULL);
135155131Srwatson	return (1);
136155131Srwatson}
137155131Srwatson
138155131Srwatson/*
139155131Srwatson * Read the event with the matching event number from the cache.
140155131Srwatson */
141155131Srwatsonstatic struct au_event_ent *
142155131Srwatsonread_from_cache(au_event_t event)
143155131Srwatson{
144155131Srwatson	struct audit_event_map *elem;
145155131Srwatson
146155131Srwatson	/* XXX: Would assert 'mutex'. */
147155131Srwatson
148155131Srwatson	LIST_FOREACH(elem, &ev_cache, ev_list) {
149155131Srwatson		if (elem->ev.ae_number == event)
150155131Srwatson			return (&elem->ev);
151155131Srwatson	}
152155131Srwatson
153155131Srwatson	return (NULL);
154155131Srwatson}
155155131Srwatson
156155131Srwatson/*
157155131Srwatson * Check if the audit event is preselected against the preselection mask.
158155131Srwatson */
159155131Srwatsonint
160155131Srwatsonau_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
161155131Srwatson{
162155131Srwatson	struct au_event_ent *ev;
163155131Srwatson	au_class_t effmask = 0;
164155131Srwatson
165155131Srwatson	if (mask_p == NULL)
166155131Srwatson		return (-1);
167155131Srwatson
168155131Srwatson
169186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
170155131Srwatson	pthread_mutex_lock(&mutex);
171186647Srwatson#endif
172155131Srwatson	if (firsttime) {
173155131Srwatson		firsttime = 0;
174155131Srwatson		if ( -1 == load_event_table()) {
175186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
176155131Srwatson			pthread_mutex_unlock(&mutex);
177186647Srwatson#endif
178155131Srwatson			return (-1);
179155131Srwatson		}
180155131Srwatson	}
181155131Srwatson	switch (flag) {
182155131Srwatson	case AU_PRS_REREAD:
183155131Srwatson		flush_cache();
184155131Srwatson		if (load_event_table() == -1) {
185186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
186155131Srwatson			pthread_mutex_unlock(&mutex);
187186647Srwatson#endif
188155131Srwatson			return (-1);
189155131Srwatson		}
190155131Srwatson		ev = read_from_cache(event);
191155131Srwatson		break;
192155131Srwatson	case AU_PRS_USECACHE:
193155131Srwatson		ev = read_from_cache(event);
194155131Srwatson		break;
195155131Srwatson	default:
196155131Srwatson		ev = NULL;
197155131Srwatson	}
198155131Srwatson	if (ev == NULL) {
199186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
200155131Srwatson		pthread_mutex_unlock(&mutex);
201186647Srwatson#endif
202155131Srwatson		return (-1);
203155131Srwatson	}
204155131Srwatson	if (sorf & AU_PRS_SUCCESS)
205155131Srwatson		effmask |= (mask_p->am_success & ev->ae_class);
206155131Srwatson	if (sorf & AU_PRS_FAILURE)
207155131Srwatson		effmask |= (mask_p->am_failure & ev->ae_class);
208186647Srwatson#ifdef HAVE_PTHREAD_MUTEX_LOCK
209155131Srwatson	pthread_mutex_unlock(&mutex);
210186647Srwatson#endif
211155131Srwatson	if (effmask != 0)
212155131Srwatson		return (1);
213155131Srwatson	return (0);
214155131Srwatson}
215