pfil.c revision 254770
1/* $FreeBSD: head/sys/net/pfil.c 254770 2013-08-24 10:30:20Z andre $ */ 2/* $NetBSD: pfil.c,v 1.20 2001/11/12 23:49:46 lukem Exp $ */ 3 4/*- 5 * Copyright (c) 1996 Matthew R. Green 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 */ 31 32#include <sys/param.h> 33#include <sys/kernel.h> 34#include <sys/errno.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/rmlock.h> 38#include <sys/socket.h> 39#include <sys/socketvar.h> 40#include <sys/systm.h> 41#include <sys/condvar.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/queue.h> 46 47#include <net/if.h> 48#include <net/pfil.h> 49 50static struct mtx pfil_global_lock; 51 52MTX_SYSINIT(pfil_heads_lock, &pfil_global_lock, "pfil_head_list lock", 53 MTX_DEF); 54 55static int pfil_list_add(pfil_list_t *, struct packet_filter_hook *, int); 56static int pfil_list_remove(pfil_list_t *, pfil_func_t, void *); 57 58LIST_HEAD(pfilheadhead, pfil_head); 59VNET_DEFINE(struct pfilheadhead, pfil_head_list); 60#define V_pfil_head_list VNET(pfil_head_list) 61VNET_DEFINE(struct rmlock, pfil_lock); 62#define V_pfil_lock VNET(pfil_lock) 63 64/* 65 * pfil_run_hooks() runs the specified packet filter hooks. 66 */ 67int 68pfil_run_hooks(struct pfil_head *ph, struct mbuf **mp, struct ifnet *ifp, 69 int dir, struct inpcb *inp) 70{ 71 struct rm_priotracker rmpt; 72 struct packet_filter_hook *pfh; 73 struct mbuf *m = *mp; 74 int rv = 0; 75 76 PFIL_RLOCK(ph, &rmpt); 77 KASSERT(ph->ph_nhooks >= 0, ("Pfil hook count dropped < 0")); 78 for (pfh = pfil_hook_get(dir, ph); pfh != NULL; 79 pfh = TAILQ_NEXT(pfh, pfil_link)) { 80 if (pfh->pfil_func != NULL) { 81 rv = (*pfh->pfil_func)(pfh->pfil_arg, &m, ifp, dir, 82 inp); 83 if (rv != 0 || m == NULL) 84 break; 85 } 86 } 87 PFIL_RUNLOCK(ph, &rmpt); 88 *mp = m; 89 return (rv); 90} 91 92/* 93 * pfil_try_rlock() acquires rm reader lock for specified head 94 * if this is immediately possible. 95 */ 96int 97pfil_try_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 98{ 99 100 return (PFIL_TRY_RLOCK(ph, tracker)); 101} 102 103/* 104 * pfil_rlock() acquires rm reader lock for specified head. 105 */ 106void 107pfil_rlock(struct pfil_head *ph, struct rm_priotracker *tracker) 108{ 109 110 PFIL_RLOCK(ph, tracker); 111} 112 113/* 114 * pfil_runlock() releases reader lock for specified head. 115 */ 116void 117pfil_runlock(struct pfil_head *ph, struct rm_priotracker *tracker) 118{ 119 120 PFIL_RUNLOCK(ph, tracker); 121} 122 123/* 124 * pfil_wlock() acquires writer lock for specified head. 125 */ 126void 127pfil_wlock(struct pfil_head *ph) 128{ 129 130 PFIL_WLOCK(ph); 131} 132 133/* 134 * pfil_wunlock() releases writer lock for specified head. 135 */ 136void 137pfil_wunlock(struct pfil_head *ph) 138{ 139 140 PFIL_WUNLOCK(ph); 141} 142 143/* 144 * pfil_wowned() returns a non-zero value if the current thread owns 145 * an exclusive lock. 146 */ 147int 148pfil_wowned(struct pfil_head *ph) 149{ 150 151 return (PFIL_WOWNED(ph)); 152} 153/* 154 * pfil_head_register() registers a pfil_head with the packet filter hook 155 * mechanism. 156 */ 157int 158pfil_head_register(struct pfil_head *ph) 159{ 160 struct pfil_head *lph; 161 162 PFIL_LIST_LOCK(); 163 LIST_FOREACH(lph, &V_pfil_head_list, ph_list) { 164 if (ph->ph_type == lph->ph_type && 165 ph->ph_un.phu_val == lph->ph_un.phu_val) { 166 PFIL_LIST_UNLOCK(); 167 return (EEXIST); 168 } 169 } 170 PFIL_LOCK_INIT(ph); 171 ph->ph_nhooks = 0; 172 TAILQ_INIT(&ph->ph_in); 173 TAILQ_INIT(&ph->ph_out); 174 LIST_INSERT_HEAD(&V_pfil_head_list, ph, ph_list); 175 PFIL_LIST_UNLOCK(); 176 return (0); 177} 178 179/* 180 * pfil_head_unregister() removes a pfil_head from the packet filter hook 181 * mechanism. The producer of the hook promises that all outstanding 182 * invocations of the hook have completed before it unregisters the hook. 183 */ 184int 185pfil_head_unregister(struct pfil_head *ph) 186{ 187 struct packet_filter_hook *pfh, *pfnext; 188 189 PFIL_LIST_LOCK(); 190 LIST_REMOVE(ph, ph_list); 191 PFIL_LIST_UNLOCK(); 192 TAILQ_FOREACH_SAFE(pfh, &ph->ph_in, pfil_link, pfnext) 193 free(pfh, M_IFADDR); 194 TAILQ_FOREACH_SAFE(pfh, &ph->ph_out, pfil_link, pfnext) 195 free(pfh, M_IFADDR); 196 PFIL_LOCK_DESTROY(ph); 197 return (0); 198} 199 200/* 201 * pfil_head_get() returns the pfil_head for a given key/dlt. 202 */ 203struct pfil_head * 204pfil_head_get(int type, u_long val) 205{ 206 struct pfil_head *ph; 207 208 PFIL_LIST_LOCK(); 209 LIST_FOREACH(ph, &V_pfil_head_list, ph_list) 210 if (ph->ph_type == type && ph->ph_un.phu_val == val) 211 break; 212 PFIL_LIST_UNLOCK(); 213 return (ph); 214} 215 216/* 217 * pfil_add_hook() adds a function to the packet filter hook. the 218 * flags are: 219 * PFIL_IN call me on incoming packets 220 * PFIL_OUT call me on outgoing packets 221 * PFIL_ALL call me on all of the above 222 * PFIL_WAITOK OK to call malloc with M_WAITOK. 223 */ 224int 225pfil_add_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 226{ 227 struct packet_filter_hook *pfh1 = NULL; 228 struct packet_filter_hook *pfh2 = NULL; 229 int err; 230 231 if (flags & PFIL_IN) { 232 pfh1 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 233 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 234 if (pfh1 == NULL) { 235 err = ENOMEM; 236 goto error; 237 } 238 } 239 if (flags & PFIL_OUT) { 240 pfh2 = (struct packet_filter_hook *)malloc(sizeof(*pfh1), 241 M_IFADDR, (flags & PFIL_WAITOK) ? M_WAITOK : M_NOWAIT); 242 if (pfh2 == NULL) { 243 err = ENOMEM; 244 goto error; 245 } 246 } 247 PFIL_WLOCK(ph); 248 if (flags & PFIL_IN) { 249 pfh1->pfil_func = func; 250 pfh1->pfil_arg = arg; 251 err = pfil_list_add(&ph->ph_in, pfh1, flags & ~PFIL_OUT); 252 if (err) 253 goto locked_error; 254 ph->ph_nhooks++; 255 } 256 if (flags & PFIL_OUT) { 257 pfh2->pfil_func = func; 258 pfh2->pfil_arg = arg; 259 err = pfil_list_add(&ph->ph_out, pfh2, flags & ~PFIL_IN); 260 if (err) { 261 if (flags & PFIL_IN) 262 pfil_list_remove(&ph->ph_in, func, arg); 263 goto locked_error; 264 } 265 ph->ph_nhooks++; 266 } 267 PFIL_WUNLOCK(ph); 268 return (0); 269locked_error: 270 PFIL_WUNLOCK(ph); 271error: 272 if (pfh1 != NULL) 273 free(pfh1, M_IFADDR); 274 if (pfh2 != NULL) 275 free(pfh2, M_IFADDR); 276 return (err); 277} 278 279/* 280 * pfil_remove_hook removes a specific function from the packet filter hook 281 * list. 282 */ 283int 284pfil_remove_hook(pfil_func_t func, void *arg, int flags, struct pfil_head *ph) 285{ 286 int err = 0; 287 288 PFIL_WLOCK(ph); 289 if (flags & PFIL_IN) { 290 err = pfil_list_remove(&ph->ph_in, func, arg); 291 if (err == 0) 292 ph->ph_nhooks--; 293 } 294 if ((err == 0) && (flags & PFIL_OUT)) { 295 err = pfil_list_remove(&ph->ph_out, func, arg); 296 if (err == 0) 297 ph->ph_nhooks--; 298 } 299 PFIL_WUNLOCK(ph); 300 return (err); 301} 302 303static int 304pfil_list_add(pfil_list_t *list, struct packet_filter_hook *pfh1, int flags) 305{ 306 struct packet_filter_hook *pfh; 307 308 /* 309 * First make sure the hook is not already there. 310 */ 311 TAILQ_FOREACH(pfh, list, pfil_link) 312 if (pfh->pfil_func == pfh1->pfil_func && 313 pfh->pfil_arg == pfh1->pfil_arg) 314 return (EEXIST); 315 316 /* 317 * Insert the input list in reverse order of the output list so that 318 * the same path is followed in or out of the kernel. 319 */ 320 if (flags & PFIL_IN) 321 TAILQ_INSERT_HEAD(list, pfh1, pfil_link); 322 else 323 TAILQ_INSERT_TAIL(list, pfh1, pfil_link); 324 return (0); 325} 326 327/* 328 * pfil_list_remove is an internal function that takes a function off the 329 * specified list. 330 */ 331static int 332pfil_list_remove(pfil_list_t *list, pfil_func_t func, void *arg) 333{ 334 struct packet_filter_hook *pfh; 335 336 TAILQ_FOREACH(pfh, list, pfil_link) 337 if (pfh->pfil_func == func && pfh->pfil_arg == arg) { 338 TAILQ_REMOVE(list, pfh, pfil_link); 339 free(pfh, M_IFADDR); 340 return (0); 341 } 342 return (ENOENT); 343} 344 345/* 346 * Stuff that must be initialized for every instance (including the first of 347 * course). 348 */ 349static int 350vnet_pfil_init(const void *unused) 351{ 352 353 LIST_INIT(&V_pfil_head_list); 354 PFIL_LOCK_INIT_REAL(&V_pfil_lock, "shared"); 355 return (0); 356} 357 358/* 359 * Called for the removal of each instance. 360 */ 361static int 362vnet_pfil_uninit(const void *unused) 363{ 364 365 /* XXX should panic if list is not empty */ 366 PFIL_LOCK_DESTROY_REAL(&V_pfil_lock); 367 return (0); 368} 369 370/* Define startup order. */ 371#define PFIL_SYSINIT_ORDER SI_SUB_PROTO_BEGIN 372#define PFIL_MODEVENT_ORDER (SI_ORDER_FIRST) /* On boot slot in here. */ 373#define PFIL_VNET_ORDER (PFIL_MODEVENT_ORDER + 2) /* Later still. */ 374 375/* 376 * Starting up. 377 * 378 * VNET_SYSINIT is called for each existing vnet and each new vnet. 379 */ 380VNET_SYSINIT(vnet_pfil_init, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER, 381 vnet_pfil_init, NULL); 382 383/* 384 * Closing up shop. These are done in REVERSE ORDER. Not called on reboot. 385 * 386 * VNET_SYSUNINIT is called for each exiting vnet as it exits. 387 */ 388VNET_SYSUNINIT(vnet_pfil_uninit, PFIL_SYSINIT_ORDER, PFIL_VNET_ORDER, 389 vnet_pfil_uninit, NULL); 390