pfil.c revision 303975
1/*	$FreeBSD: releng/11.0/sys/net/pfil.c 302054 2016-06-21 13:48:49Z bz $ */
2/*	$NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $	*/
3
4/*-
5 * Copyright (c) 1996 Matthew R. Green
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. The name of the author may not be used to endorse or promote products
17 *    derived from this software without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32#include <sys/param.h>
33#include <sys/kernel.h>
34#include <sys/errno.h>
35#include <sys/lock.h>
36#include <sys/malloc.h>
37#include <sys/rmlock.h>
38#include <sys/socket.h>
39#include <sys/socketvar.h>
40#include <sys/systm.h>
41#include <sys/condvar.h>
42#include <sys/lock.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/queue.h>
46
47#include <net/if.h>
48#include <net/if_var.h>
49#include <net/pfil.h>
50
51static struct mtx pfil_global_lock;
52
53MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock",
54  MTX_DEF);
55
56static struct packet_filter_hook *pfil_chain_get(int, struct pfil_head *);
57static int pfil_chain_add(pfil_chain_t *, struct packet_filter_hook *, int);
58static int pfil_chain_remove(pfil_chain_t *, pfil_func_t, void *);
59
60LIST_HEAD(pfilheadhead, pfil_head);
61VNET_DEFINE(struct pfilheadhead, pfil_head_list);
62#define	V_pfil_head_list	VNET(pfil_head_list)
63VNET_DEFINE(struct rmlock, pfil_lock);
64#define	V_pfil_lock	VNET(pfil_lock)
65
66/*
67 * pfil_run_hooks() runs the specified packet filter hook chain.
68 */
69int
70pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp,
71    int dir, struct inpcb *inp)
72{
73	struct rm_priotracker rmpt;
74	struct packet_filter_hook *pfh;
75	struct mbuf *m = *mp;
76	int rv = 0;
77
78	PFIL_RLOCK(ph, &rmpt);
79	KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0"));
80	for (pfh = pfil_chain_get(dir, ph); pfh != NULL;
81	     pfh = TAILQ_NEXT(pfh, pfil_chain)) {
82		if (pfh->pfil_func != NULL) {
83			rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir,
84			    inp);
85			if (rv != 0 || m == NULL)
86				break;
87		}
88	}
89	PFIL_RUNLOCK(ph, &rmpt);
90	*mp = m;
91	return (rv);
92}
93
94static struct packet_filter_hook *
95pfil_chain_get(int dir, struct pfil_head *ph)
96{
97
98	if (dir == PFIL_IN)
99		return (TAILQ_FIRST(&ph->ph_in));
100	else if (dir == PFIL_OUT)
101		return (TAILQ_FIRST(&ph->ph_out));
102	else
103		return (NULL);
104}
105
106/*
107 * pfil_try_rlock() acquires rm reader lock for specified head
108 * if this is immediately possible.
109 */
110int
111pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
112{
113
114	return (PFIL_TRY_RLOCK(ph, tracker));
115}
116
117/*
118 * pfil_rlock() acquires rm reader lock for specified head.
119 */
120void
121pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker)
122{
123
124	PFIL_RLOCK(ph, tracker);
125}
126
127/*
128 * pfil_runlock() releases reader lock for specified head.
129 */
130void
131pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker)
132{
133
134	PFIL_RUNLOCK(ph, tracker);
135}
136
137/*
138 * pfil_wlock() acquires writer lock for specified head.
139 */
140void
141pfil_wlock(struct pfil_head *ph)
142{
143
144	PFIL_WLOCK(ph);
145}
146
147/*
148 * pfil_wunlock() releases writer lock for specified head.
149 */
150void
151pfil_wunlock(struct pfil_head *ph)
152{
153
154	PFIL_WUNLOCK(ph);
155}
156
157/*
158 * pfil_wowned() returns a non-zero value if the current thread owns
159 * an exclusive lock.
160 */
161int
162pfil_wowned(struct pfil_head *ph)
163{
164
165	return (PFIL_WOWNED(ph));
166}
167
168/*
169 * pfil_head_register() registers a pfil_head with the packet filter hook
170 * mechanism.
171 */
172int
173pfil_head_register(struct pfil_head *ph)
174{
175	struct pfil_head *lph;
176
177	PFIL_HEADLIST_LOCK();
178	LIST_FOREACH(lph, &V_pfil_head_list, ph_list) {
179		if (ph->ph_type == lph->ph_type &&
180		    ph->ph_un.phu_val == lph->ph_un.phu_val) {
181			PFIL_HEADLIST_UNLOCK();
182			return (EEXIST);
183		}
184	}
185	PFIL_LOCK_INIT(ph);
186	ph->ph_nhooks = 0;
187	TAILQ_INIT(&ph->ph_in);
188	TAILQ_INIT(&ph->ph_out);
189	LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list);
190	PFIL_HEADLIST_UNLOCK();
191	return (0);
192}
193
194/*
195 * pfil_head_unregister() removes a pfil_head from the packet filter hook
196 * mechanism.  The producer of the hook promises that all outstanding
197 * invocations of the hook have completed before it unregisters the hook.
198 */
199int
200pfil_head_unregister(struct pfil_head *ph)
201{
202	struct packet_filter_hook *pfh, *pfnext;
203
204	PFIL_HEADLIST_LOCK();
205	LIST_REMOVE(ph, ph_list);
206	PFIL_HEADLIST_UNLOCK();
207	TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_chain, pfnext)
208		free(pfh, M_IFADDR);
209	TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_chain, pfnext)
210		free(pfh, M_IFADDR);
211	PFIL_LOCK_DESTROY(ph);
212	return (0);
213}
214
215/*
216 * pfil_head_get() returns the pfil_head for a given key/dlt.
217 */
218struct pfil_head *
219pfil_head_get(int type, u_long val)
220{
221	struct pfil_head *ph;
222
223	PFIL_HEADLIST_LOCK();
224	LIST_FOREACH(ph, &V_pfil_head_list, ph_list)
225		if (ph->ph_type == type && ph->ph_un.phu_val == val)
226			break;
227	PFIL_HEADLIST_UNLOCK();
228	return (ph);
229}
230
231/*
232 * pfil_add_hook() adds a function to the packet filter hook.  the
233 * flags are:
234 *	PFIL_IN		call me on incoming packets
235 *	PFIL_OUT	call me on outgoing packets
236 *	PFIL_ALL	call me on all of the above
237 *	PFIL_WAITOK	OK to call malloc with M_WAITOK.
238 */
239int
240pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
241{
242	struct packet_filter_hook *pfh1 = NULL;
243	struct packet_filter_hook *pfh2 = NULL;
244	int err;
245
246	if (flags & PFIL_IN) {
247		pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
248		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
249		if (pfh1 == NULL) {
250			err = ENOMEM;
251			goto error;
252		}
253	}
254	if (flags & PFIL_OUT) {
255		pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1),
256		    M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT);
257		if (pfh2 == NULL) {
258			err = ENOMEM;
259			goto error;
260		}
261	}
262	PFIL_WLOCK(ph);
263	if (flags & PFIL_IN) {
264		pfh1->pfil_func = func;
265		pfh1->pfil_arg = arg;
266		err = pfil_chain_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT);
267		if (err)
268			goto locked_error;
269		ph->ph_nhooks++;
270	}
271	if (flags & PFIL_OUT) {
272		pfh2->pfil_func = func;
273		pfh2->pfil_arg = arg;
274		err = pfil_chain_add(&ph->ph_out, pfh2, flags & ~PFIL_IN);
275		if (err) {
276			if (flags & PFIL_IN)
277				pfil_chain_remove(&ph->ph_in, func, arg);
278			goto locked_error;
279		}
280		ph->ph_nhooks++;
281	}
282	PFIL_WUNLOCK(ph);
283	return (0);
284locked_error:
285	PFIL_WUNLOCK(ph);
286error:
287	if (pfh1 != NULL)
288		free(pfh1, M_IFADDR);
289	if (pfh2 != NULL)
290		free(pfh2, M_IFADDR);
291	return (err);
292}
293
294/*
295 * pfil_remove_hook removes a specific function from the packet filter hook
296 * chain.
297 */
298int
299pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph)
300{
301	int err = 0;
302
303	PFIL_WLOCK(ph);
304	if (flags & PFIL_IN) {
305		err = pfil_chain_remove(&ph->ph_in, func, arg);
306		if (err == 0)
307			ph->ph_nhooks--;
308	}
309	if ((err == 0) && (flags & PFIL_OUT)) {
310		err = pfil_chain_remove(&ph->ph_out, func, arg);
311		if (err == 0)
312			ph->ph_nhooks--;
313	}
314	PFIL_WUNLOCK(ph);
315	return (err);
316}
317
318/*
319 * Internal: Add a new pfil hook into a hook chain.
320 */
321static int
322pfil_chain_add(pfil_chain_t *chain, struct packet_filter_hook *pfh1, int flags)
323{
324	struct packet_filter_hook *pfh;
325
326	/*
327	 * First make sure the hook is not already there.
328	 */
329	TAILQ_FOREACH(pfh, chain, pfil_chain)
330		if (pfh->pfil_func == pfh1->pfil_func &&
331		    pfh->pfil_arg == pfh1->pfil_arg)
332			return (EEXIST);
333
334	/*
335	 * Insert the input list in reverse order of the output list so that
336	 * the same path is followed in or out of the kernel.
337	 */
338	if (flags & PFIL_IN)
339		TAILQ_INSERT_HEAD(chain, pfh1, pfil_chain);
340	else
341		TAILQ_INSERT_TAIL(chain, pfh1, pfil_chain);
342	return (0);
343}
344
345/*
346 * Internal: Remove a pfil hook from a hook chain.
347 */
348static int
349pfil_chain_remove(pfil_chain_t *chain, pfil_func_t func, void *arg)
350{
351	struct packet_filter_hook *pfh;
352
353	TAILQ_FOREACH(pfh, chain, pfil_chain)
354		if (pfh->pfil_func == func && pfh->pfil_arg == arg) {
355			TAILQ_REMOVE(chain, pfh, pfil_chain);
356			free(pfh, M_IFADDR);
357			return (0);
358		}
359	return (ENOENT);
360}
361
362/*
363 * Stuff that must be initialized for every instance (including the first of
364 * course).
365 */
366static void
367vnet_pfil_init(const void *unused __unused)
368{
369
370	LIST_INIT(&V_pfil_head_list);
371	PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared");
372}
373
374/*
375 * Called for the removal of each instance.
376 */
377static void
378vnet_pfil_uninit(const void *unused __unused)
379{
380
381	KASSERT(LIST_EMPTY(&V_pfil_head_list),
382	    ("%s: pfil_head_list %p not empty", __func__, &V_pfil_head_list));
383	PFIL_LOCK_DESTROY_REAL(&V_pfil_lock);
384}
385
386/*
387 * Starting up.
388 *
389 * VNET_SYSINIT is called for each existing vnet and each new vnet.
390 * Make sure the pfil bits are first before any possible subsystem which
391 * might piggyback on the SI_SUB_PROTO_PFIL.
392 */
393VNET_SYSINIT(vnet_pfil_init, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST,
394    vnet_pfil_init, NULL);
395
396/*
397 * Closing up shop.  These are done in REVERSE ORDER.  Not called on reboot.
398 *
399 * VNET_SYSUNINIT is called for each exiting vnet as it exits.
400 */
401VNET_SYSUNINIT(vnet_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_FIRST,
402    vnet_pfil_uninit, NULL);
403