1/*-
2 * Copyright (c) 2004 Apple Inc.
3 * Copyright (c) 2005 Robert N. M. Watson
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1.  Redistributions of source code must retain the above copyright
10 *     notice, this list of conditions and the following disclaimer.
11 * 2.  Redistributions in binary form must reproduce the above copyright
12 *     notice, this list of conditions and the following disclaimer in the
13 *     documentation and/or other materials provided with the distribution.
14 * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
15 *     its contributors may be used to endorse or promote products derived
16 *     from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
22 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/types.h>
32
33#include <config/config.h>
34#ifdef HAVE_FULL_QUEUE_H
35#include <sys/queue.h>
36#else /* !HAVE_FULL_QUEUE_H */
37#include <compat/queue.h>
38#endif /* !HAVE_FULL_QUEUE_H */
39
40#include <bsm/libbsm.h>
41
42#ifdef HAVE_PTHREAD_MUTEX_LOCK
43#include <pthread.h>
44#endif
45#include <stdlib.h>
46#include <string.h>
47
48/* MT-Safe */
49#ifdef HAVE_PTHREAD_MUTEX_LOCK
50static pthread_mutex_t	mutex = PTHREAD_MUTEX_INITIALIZER;
51#endif
52static int		firsttime = 1;
53
54/*
55 * XXX ev_cache, once created, sticks around until the calling program exits.
56 * This may or may not be a problem as far as absolute memory usage goes, but
57 * at least there don't appear to be any leaks in using the cache.
58 *
59 * XXXRW: Note that despite (mutex), load_event_table() could race with
60 * other consumers of the getauevents() API.
61 */
62struct audit_event_map {
63	char				 ev_name[AU_EVENT_NAME_MAX];
64	char				 ev_desc[AU_EVENT_DESC_MAX];
65	struct au_event_ent		 ev;
66	LIST_ENTRY(audit_event_map)	 ev_list;
67};
68static LIST_HEAD(, audit_event_map)	ev_cache;
69
70static struct audit_event_map *
71audit_event_map_alloc(void)
72{
73	struct audit_event_map *aemp;
74
75	aemp = malloc(sizeof(*aemp));
76	if (aemp == NULL)
77		return (aemp);
78	bzero(aemp, sizeof(*aemp));
79	aemp->ev.ae_name = aemp->ev_name;
80	aemp->ev.ae_desc = aemp->ev_desc;
81	return (aemp);
82}
83
84static void
85audit_event_map_free(struct audit_event_map *aemp)
86{
87
88	free(aemp);
89}
90
91/*
92 * When reading into the cache fails, we need to flush the entire cache to
93 * prevent it from containing some but not all records.
94 */
95static void
96flush_cache(void)
97{
98	struct audit_event_map *aemp;
99
100	/* XXX: Would assert 'mutex'. */
101
102	while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
103		LIST_REMOVE(aemp, ev_list);
104		audit_event_map_free(aemp);
105	}
106}
107
108static int
109load_event_table(void)
110{
111	struct audit_event_map *aemp;
112	struct au_event_ent *ep;
113
114	/*
115	 * XXX: Would assert 'mutex'.
116	 * Loading of the cache happens only once; dont check if cache is
117	 * already loaded.
118	 */
119	LIST_INIT(&ev_cache);
120	setauevent();	/* Rewind to beginning of entries. */
121	do {
122		aemp = audit_event_map_alloc();
123		if (aemp == NULL) {
124			flush_cache();
125			return (-1);
126		}
127		ep = getauevent_r(&aemp->ev);
128		if (ep != NULL)
129			LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
130		else
131			audit_event_map_free(aemp);
132	} while (ep != NULL);
133	return (1);
134}
135
136/*
137 * Read the event with the matching event number from the cache.
138 */
139static struct au_event_ent *
140read_from_cache(au_event_t event)
141{
142	struct audit_event_map *elem;
143
144	/* XXX: Would assert 'mutex'. */
145
146	LIST_FOREACH(elem, &ev_cache, ev_list) {
147		if (elem->ev.ae_number == event)
148			return (&elem->ev);
149	}
150
151	return (NULL);
152}
153
154/*
155 * Check if the audit event is preselected against the preselection mask.
156 */
157int
158au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
159{
160	struct au_event_ent *ev;
161	au_class_t effmask = 0;
162
163	if (mask_p == NULL)
164		return (-1);
165
166
167#ifdef HAVE_PTHREAD_MUTEX_LOCK
168	pthread_mutex_lock(&mutex);
169#endif
170	if (firsttime) {
171		firsttime = 0;
172		if ( -1 == load_event_table()) {
173#ifdef HAVE_PTHREAD_MUTEX_LOCK
174			pthread_mutex_unlock(&mutex);
175#endif
176			return (-1);
177		}
178	}
179	switch (flag) {
180	case AU_PRS_REREAD:
181		flush_cache();
182		if (load_event_table() == -1) {
183#ifdef HAVE_PTHREAD_MUTEX_LOCK
184			pthread_mutex_unlock(&mutex);
185#endif
186			return (-1);
187		}
188		ev = read_from_cache(event);
189		break;
190	case AU_PRS_USECACHE:
191		ev = read_from_cache(event);
192		break;
193	default:
194		ev = NULL;
195	}
196	if (ev == NULL) {
197#ifdef HAVE_PTHREAD_MUTEX_LOCK
198		pthread_mutex_unlock(&mutex);
199#endif
200		return (-1);
201	}
202	if (sorf & AU_PRS_SUCCESS)
203		effmask |= (mask_p->am_success & ev->ae_class);
204	if (sorf & AU_PRS_FAILURE)
205		effmask |= (mask_p->am_failure & ev->ae_class);
206#ifdef HAVE_PTHREAD_MUTEX_LOCK
207	pthread_mutex_unlock(&mutex);
208#endif
209	if (effmask != 0)
210		return (1);
211	return (0);
212}
213