Deleted Added
full compact
if_tun.c (130585) if_tun.c (130640)
1/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
2
3/*
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 *
1/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */
2
3/*
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 *
16 * $FreeBSD: head/sys/net/if_tun.c 130585 2004-06-16 09:47:26Z phk $
16 * $FreeBSD: head/sys/net/if_tun.c 130640 2004-06-17 17:16:53Z phk $
17 */
18
19#include "opt_atalk.h"
20#include "opt_inet.h"
21#include "opt_inet6.h"
22#include "opt_ipx.h"
23#include "opt_mac.h"
24
25#include <sys/param.h>
26#include <sys/proc.h>
27#include <sys/systm.h>
28#include <sys/mac.h>
29#include <sys/mbuf.h>
30#include <sys/module.h>
31#include <sys/socket.h>
32#include <sys/filio.h>
33#include <sys/sockio.h>
34#include <sys/ttycom.h>
35#include <sys/poll.h>
36#include <sys/signalvar.h>
37#include <sys/filedesc.h>
38#include <sys/kernel.h>
39#include <sys/sysctl.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/malloc.h>
44#include <sys/random.h>
45
46#include <net/if.h>
47#include <net/if_types.h>
48#include <net/netisr.h>
49#include <net/route.h>
50#ifdef INET
51#include <netinet/in.h>
52#endif
53#include <net/bpf.h>
54#include <net/if_tun.h>
55
56#include <sys/queue.h>
57
58/*
59 * tun_list is protected by global tunmtx. Other mutable fields are
60 * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is
61 * static for the duration of a tunnel interface.
62 */
63struct tun_softc {
64 TAILQ_ENTRY(tun_softc) tun_list;
65 struct cdev *tun_dev;
66 u_short tun_flags; /* misc flags */
67#define TUN_OPEN 0x0001
68#define TUN_INITED 0x0002
69#define TUN_RCOLL 0x0004
70#define TUN_IASET 0x0008
71#define TUN_DSTADDR 0x0010
72#define TUN_LMODE 0x0020
73#define TUN_RWAIT 0x0040
74#define TUN_ASYNC 0x0080
75#define TUN_IFHEAD 0x0100
76
77#define TUN_READY (TUN_OPEN | TUN_INITED)
78
79 /*
80 * XXXRW: tun_pid is used to exclusively lock /dev/tun. Is this
81 * actually needed? Can we just return EBUSY if already open?
82 * Problem is that this involved inherent races when a tun device
83 * is handed off from one process to another, as opposed to just
84 * being slightly stale informationally.
85 */
86 pid_t tun_pid; /* owning pid */
87 struct ifnet tun_if; /* the interface */
88 struct sigio *tun_sigio; /* information for async I/O */
89 struct selinfo tun_rsel; /* read select */
90 struct mtx tun_mtx; /* protect mutable softc fields */
91};
92
93#define TUNDEBUG if (tundebug) if_printf
94#define TUNNAME "tun"
95
96/*
97 * All mutable global variables in if_tun are locked using tunmtx, with
98 * the exception of tundebug, which is used unlocked, and tunclones,
99 * which is static after setup.
100 */
101static struct mtx tunmtx;
102static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
103static int tundebug = 0;
104static struct clonedevs *tunclones;
105static TAILQ_HEAD(,tun_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
106SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
107
108static void tunclone(void *arg, char *name, int namelen, struct cdev **dev);
109static void tuncreate(struct cdev *dev);
110static int tunifioctl(struct ifnet *, u_long, caddr_t);
111static int tuninit(struct ifnet *);
112static int tunmodevent(module_t, int, void *);
113static int tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
114 struct rtentry *rt);
115static void tunstart(struct ifnet *);
116
117static d_open_t tunopen;
118static d_close_t tunclose;
119static d_read_t tunread;
120static d_write_t tunwrite;
121static d_ioctl_t tunioctl;
122static d_poll_t tunpoll;
123
124static struct cdevsw tun_cdevsw = {
125 .d_version = D_VERSION,
126 .d_flags = D_PSEUDO | D_NEEDGIANT,
127 .d_open = tunopen,
128 .d_close = tunclose,
129 .d_read = tunread,
130 .d_write = tunwrite,
131 .d_ioctl = tunioctl,
132 .d_poll = tunpoll,
133 .d_name = TUNNAME,
134};
135
136static void
137tunclone(void *arg, char *name, int namelen, struct cdev **dev)
138{
139 int u, i;
140
17 */
18
19#include "opt_atalk.h"
20#include "opt_inet.h"
21#include "opt_inet6.h"
22#include "opt_ipx.h"
23#include "opt_mac.h"
24
25#include <sys/param.h>
26#include <sys/proc.h>
27#include <sys/systm.h>
28#include <sys/mac.h>
29#include <sys/mbuf.h>
30#include <sys/module.h>
31#include <sys/socket.h>
32#include <sys/filio.h>
33#include <sys/sockio.h>
34#include <sys/ttycom.h>
35#include <sys/poll.h>
36#include <sys/signalvar.h>
37#include <sys/filedesc.h>
38#include <sys/kernel.h>
39#include <sys/sysctl.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/malloc.h>
44#include <sys/random.h>
45
46#include <net/if.h>
47#include <net/if_types.h>
48#include <net/netisr.h>
49#include <net/route.h>
50#ifdef INET
51#include <netinet/in.h>
52#endif
53#include <net/bpf.h>
54#include <net/if_tun.h>
55
56#include <sys/queue.h>
57
58/*
59 * tun_list is protected by global tunmtx. Other mutable fields are
60 * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is
61 * static for the duration of a tunnel interface.
62 */
63struct tun_softc {
64 TAILQ_ENTRY(tun_softc) tun_list;
65 struct cdev *tun_dev;
66 u_short tun_flags; /* misc flags */
67#define TUN_OPEN 0x0001
68#define TUN_INITED 0x0002
69#define TUN_RCOLL 0x0004
70#define TUN_IASET 0x0008
71#define TUN_DSTADDR 0x0010
72#define TUN_LMODE 0x0020
73#define TUN_RWAIT 0x0040
74#define TUN_ASYNC 0x0080
75#define TUN_IFHEAD 0x0100
76
77#define TUN_READY (TUN_OPEN | TUN_INITED)
78
79 /*
80 * XXXRW: tun_pid is used to exclusively lock /dev/tun. Is this
81 * actually needed? Can we just return EBUSY if already open?
82 * Problem is that this involved inherent races when a tun device
83 * is handed off from one process to another, as opposed to just
84 * being slightly stale informationally.
85 */
86 pid_t tun_pid; /* owning pid */
87 struct ifnet tun_if; /* the interface */
88 struct sigio *tun_sigio; /* information for async I/O */
89 struct selinfo tun_rsel; /* read select */
90 struct mtx tun_mtx; /* protect mutable softc fields */
91};
92
93#define TUNDEBUG if (tundebug) if_printf
94#define TUNNAME "tun"
95
96/*
97 * All mutable global variables in if_tun are locked using tunmtx, with
98 * the exception of tundebug, which is used unlocked, and tunclones,
99 * which is static after setup.
100 */
101static struct mtx tunmtx;
102static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
103static int tundebug = 0;
104static struct clonedevs *tunclones;
105static TAILQ_HEAD(,tun_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
106SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
107
108static void tunclone(void *arg, char *name, int namelen, struct cdev **dev);
109static void tuncreate(struct cdev *dev);
110static int tunifioctl(struct ifnet *, u_long, caddr_t);
111static int tuninit(struct ifnet *);
112static int tunmodevent(module_t, int, void *);
113static int tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
114 struct rtentry *rt);
115static void tunstart(struct ifnet *);
116
117static d_open_t tunopen;
118static d_close_t tunclose;
119static d_read_t tunread;
120static d_write_t tunwrite;
121static d_ioctl_t tunioctl;
122static d_poll_t tunpoll;
123
124static struct cdevsw tun_cdevsw = {
125 .d_version = D_VERSION,
126 .d_flags = D_PSEUDO | D_NEEDGIANT,
127 .d_open = tunopen,
128 .d_close = tunclose,
129 .d_read = tunread,
130 .d_write = tunwrite,
131 .d_ioctl = tunioctl,
132 .d_poll = tunpoll,
133 .d_name = TUNNAME,
134};
135
136static void
137tunclone(void *arg, char *name, int namelen, struct cdev **dev)
138{
139 int u, i;
140
141 if (*dev != NODEV)
141 if (*dev != NULL)
142 return;
143
144 if (strcmp(name, TUNNAME) == 0) {
145 u = -1;
146 } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
147 return; /* Don't recognise the name */
148 if (u != -1 && u > IF_MAXUNIT)
149 return; /* Unit number too high */
150
151 /* find any existing device, or allocate new unit number */
152 i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
153 if (i) {
154 /* No preexisting struct cdev *, create one */
155 *dev = make_dev(&tun_cdevsw, unit2minor(u),
156 UID_UUCP, GID_DIALER, 0600, "tun%d", u);
157 if (*dev != NULL)
158 (*dev)->si_flags |= SI_CHEAPCLONE;
159 }
160}
161
162static void
163tun_destroy(struct tun_softc *tp)
164{
165 struct cdev *dev;
166
167 /* Unlocked read. */
168 KASSERT((tp->tun_flags & TUN_OPEN) == 0,
169 ("tununits is out of sync - unit %d", tp->tun_if.if_dunit));
170
171 dev = tp->tun_dev;
172 bpfdetach(&tp->tun_if);
173 if_detach(&tp->tun_if);
174 destroy_dev(dev);
175 mtx_destroy(&tp->tun_mtx);
176 free(tp, M_TUN);
177}
178
179static int
180tunmodevent(module_t mod, int type, void *data)
181{
182 static eventhandler_tag tag;
183 struct tun_softc *tp;
184
185 switch (type) {
186 case MOD_LOAD:
187 mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
188 clone_setup(&tunclones);
189 tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
190 if (tag == NULL)
191 return (ENOMEM);
192 break;
193 case MOD_UNLOAD:
194 EVENTHANDLER_DEREGISTER(dev_clone, tag);
195
196 mtx_lock(&tunmtx);
197 while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
198 TAILQ_REMOVE(&tunhead, tp, tun_list);
199 mtx_unlock(&tunmtx);
200 tun_destroy(tp);
201 mtx_lock(&tunmtx);
202 }
203 mtx_unlock(&tunmtx);
204 clone_cleanup(&tunclones);
205 mtx_destroy(&tunmtx);
206 break;
207 }
208 return 0;
209}
210
211static moduledata_t tun_mod = {
212 "if_tun",
213 tunmodevent,
214 0
215};
216
217DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
218
219static void
220tunstart(struct ifnet *ifp)
221{
222 struct tun_softc *tp = ifp->if_softc;
223
224 mtx_lock(&tp->tun_mtx);
225 if (tp->tun_flags & TUN_RWAIT) {
226 tp->tun_flags &= ~TUN_RWAIT;
227 wakeup(tp);
228 }
229 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
230 mtx_unlock(&tp->tun_mtx);
231 pgsigio(&tp->tun_sigio, SIGIO, 0);
232 } else
233 mtx_unlock(&tp->tun_mtx);
234 selwakeuppri(&tp->tun_rsel, PZERO + 1);
235}
236
237static void
238tuncreate(struct cdev *dev)
239{
240 struct tun_softc *sc;
241 struct ifnet *ifp;
242
243 dev->si_flags &= ~SI_CHEAPCLONE;
244
245 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
246 mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
247 sc->tun_flags = TUN_INITED;
248 sc->tun_dev = dev;
249 mtx_lock(&tunmtx);
250 TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
251 mtx_unlock(&tunmtx);
252
253 ifp = &sc->tun_if;
254 if_initname(ifp, TUNNAME, dev2unit(dev));
255 ifp->if_mtu = TUNMTU;
256 ifp->if_ioctl = tunifioctl;
257 ifp->if_output = tunoutput;
258 ifp->if_start = tunstart;
259 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
260 ifp->if_type = IFT_PPP;
261 ifp->if_snd.ifq_maxlen = ifqmaxlen;
262 ifp->if_softc = sc;
263 if_attach(ifp);
264 bpfattach(ifp, DLT_NULL, sizeof(u_int));
265 dev->si_drv1 = sc;
266}
267
268static int
269tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
270{
271 struct ifnet *ifp;
272 struct tun_softc *tp;
273
274 /*
275 * XXXRW: Non-atomic test and set of dev->si_drv1 requires
276 * synchronization.
277 */
278 tp = dev->si_drv1;
279 if (!tp) {
280 tuncreate(dev);
281 tp = dev->si_drv1;
282 }
283
284 /*
285 * XXXRW: This use of tun_pid is subject to error due to the
286 * fact that a reference to the tunnel can live beyond the
287 * death of the process that created it. Can we replace this
288 * with a simple busy flag?
289 */
290 mtx_lock(&tp->tun_mtx);
291 if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
292 mtx_unlock(&tp->tun_mtx);
293 return (EBUSY);
294 }
295 tp->tun_pid = td->td_proc->p_pid;
296
297 tp->tun_flags |= TUN_OPEN;
298 mtx_unlock(&tp->tun_mtx);
299 ifp = &tp->tun_if;
300 TUNDEBUG(ifp, "open\n");
301
302 return (0);
303}
304
305/*
306 * tunclose - close the device - mark i/f down & delete
307 * routing info
308 */
309static int
310tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
311{
312 struct tun_softc *tp;
313 struct ifnet *ifp;
314 int s;
315
316 tp = dev->si_drv1;
317 ifp = &tp->tun_if;
318
319 mtx_lock(&tp->tun_mtx);
320 tp->tun_flags &= ~TUN_OPEN;
321 tp->tun_pid = 0;
322 mtx_unlock(&tp->tun_mtx);
323
324 /*
325 * junk all pending output
326 */
327 IF_DRAIN(&ifp->if_snd);
328
329 if (ifp->if_flags & IFF_UP) {
330 s = splimp();
331 if_down(ifp);
332 splx(s);
333 }
334
335 if (ifp->if_flags & IFF_RUNNING) {
336 struct ifaddr *ifa;
337
338 s = splimp();
339 /* find internet addresses and delete routes */
340 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
341 if (ifa->ifa_addr->sa_family == AF_INET)
342 /* Unlocked read. */
343 rtinit(ifa, (int)RTM_DELETE,
344 tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
345 ifp->if_flags &= ~IFF_RUNNING;
346 splx(s);
347 }
348
349 funsetown(&tp->tun_sigio);
350 selwakeuppri(&tp->tun_rsel, PZERO + 1);
351 TUNDEBUG (ifp, "closed\n");
352 return (0);
353}
354
355static int
356tuninit(struct ifnet *ifp)
357{
358 struct tun_softc *tp = ifp->if_softc;
359 struct ifaddr *ifa;
360 int error = 0;
361
362 TUNDEBUG(ifp, "tuninit\n");
363
364 ifp->if_flags |= IFF_UP | IFF_RUNNING;
365 getmicrotime(&ifp->if_lastchange);
366
367 for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
368 ifa = TAILQ_NEXT(ifa, ifa_link)) {
369 if (ifa->ifa_addr == NULL)
370 error = EFAULT;
371 /* XXX: Should maybe return straight off? */
372 else {
373#ifdef INET
374 if (ifa->ifa_addr->sa_family == AF_INET) {
375 struct sockaddr_in *si;
376
377 si = (struct sockaddr_in *)ifa->ifa_addr;
378 mtx_lock(&tp->tun_mtx);
379 if (si->sin_addr.s_addr)
380 tp->tun_flags |= TUN_IASET;
381
382 si = (struct sockaddr_in *)ifa->ifa_dstaddr;
383 if (si && si->sin_addr.s_addr)
384 tp->tun_flags |= TUN_DSTADDR;
385 mtx_unlock(&tp->tun_mtx);
386 }
387#endif
388 }
389 }
390 return (error);
391}
392
393/*
394 * Process an ioctl request.
395 */
396static int
397tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
398{
399 struct ifreq *ifr = (struct ifreq *)data;
400 struct tun_softc *tp = ifp->if_softc;
401 struct ifstat *ifs;
402 int error = 0, s;
403
404 s = splimp();
405 switch(cmd) {
406 case SIOCGIFSTATUS:
407 ifs = (struct ifstat *)data;
408 mtx_lock(&tp->tun_mtx);
409 if (tp->tun_pid)
410 sprintf(ifs->ascii + strlen(ifs->ascii),
411 "\tOpened by PID %d\n", tp->tun_pid);
412 mtx_unlock(&tp->tun_mtx);
413 break;
414 case SIOCSIFADDR:
415 error = tuninit(ifp);
416 TUNDEBUG(ifp, "address set, error=%d\n", error);
417 break;
418 case SIOCSIFDSTADDR:
419 error = tuninit(ifp);
420 TUNDEBUG(ifp, "destination address set, error=%d\n", error);
421 break;
422 case SIOCSIFMTU:
423 ifp->if_mtu = ifr->ifr_mtu;
424 TUNDEBUG(ifp, "mtu set\n");
425 break;
426 case SIOCSIFFLAGS:
427 case SIOCADDMULTI:
428 case SIOCDELMULTI:
429 break;
430 default:
431 error = EINVAL;
432 }
433 splx(s);
434 return (error);
435}
436
437/*
438 * tunoutput - queue packets from higher level ready to put out.
439 */
440static int
441tunoutput(
442 struct ifnet *ifp,
443 struct mbuf *m0,
444 struct sockaddr *dst,
445 struct rtentry *rt)
446{
447 struct tun_softc *tp = ifp->if_softc;
448 u_short cached_tun_flags;
449 int error;
450
451 TUNDEBUG (ifp, "tunoutput\n");
452
453#ifdef MAC
454 error = mac_check_ifnet_transmit(ifp, m0);
455 if (error) {
456 m_freem(m0);
457 return (error);
458 }
459#endif
460
461 /* Could be unlocked read? */
462 mtx_lock(&tp->tun_mtx);
463 cached_tun_flags = tp->tun_flags;
464 mtx_unlock(&tp->tun_mtx);
465 if ((cached_tun_flags & TUN_READY) != TUN_READY) {
466 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
467 m_freem (m0);
468 return (EHOSTDOWN);
469 }
470
471 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
472 m_freem (m0);
473 return (EHOSTDOWN);
474 }
475
476 /* BPF write needs to be handled specially */
477 if (dst->sa_family == AF_UNSPEC) {
478 dst->sa_family = *(mtod(m0, int *));
479 m0->m_len -= sizeof(int);
480 m0->m_pkthdr.len -= sizeof(int);
481 m0->m_data += sizeof(int);
482 }
483
484 if (ifp->if_bpf) {
485 uint32_t af = dst->sa_family;
486 bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
487 }
488
489 /* prepend sockaddr? this may abort if the mbuf allocation fails */
490 if (cached_tun_flags & TUN_LMODE) {
491 /* allocate space for sockaddr */
492 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
493
494 /* if allocation failed drop packet */
495 if (m0 == NULL) {
496 ifp->if_iqdrops++;
497 ifp->if_oerrors++;
498 return (ENOBUFS);
499 } else {
500 bcopy(dst, m0->m_data, dst->sa_len);
501 }
502 }
503
504 if (cached_tun_flags & TUN_IFHEAD) {
505 /* Prepend the address family */
506 M_PREPEND(m0, 4, M_DONTWAIT);
507
508 /* if allocation failed drop packet */
509 if (m0 == NULL) {
510 ifp->if_iqdrops++;
511 ifp->if_oerrors++;
512 return (ENOBUFS);
513 } else
514 *(u_int32_t *)m0->m_data = htonl(dst->sa_family);
515 } else {
516#ifdef INET
517 if (dst->sa_family != AF_INET)
518#endif
519 {
520 m_freem(m0);
521 return (EAFNOSUPPORT);
522 }
523 }
524
525 IFQ_HANDOFF(ifp, m0, error);
526 if (error) {
527 ifp->if_collisions++;
528 return (ENOBUFS);
529 }
530 ifp->if_opackets++;
531 return (0);
532}
533
534/*
535 * the cdevsw interface is now pretty minimal.
536 */
537static int
538tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
539{
540 int s;
541 int error;
542 struct tun_softc *tp = dev->si_drv1;
543 struct tuninfo *tunp;
544
545 switch (cmd) {
546 case TUNSIFINFO:
547 tunp = (struct tuninfo *)data;
548 if (tunp->mtu < IF_MINMTU)
549 return (EINVAL);
550 if (tp->tun_if.if_mtu != tunp->mtu
551 && (error = suser(td)) != 0)
552 return (error);
553 tp->tun_if.if_mtu = tunp->mtu;
554 tp->tun_if.if_type = tunp->type;
555 tp->tun_if.if_baudrate = tunp->baudrate;
556 break;
557 case TUNGIFINFO:
558 tunp = (struct tuninfo *)data;
559 tunp->mtu = tp->tun_if.if_mtu;
560 tunp->type = tp->tun_if.if_type;
561 tunp->baudrate = tp->tun_if.if_baudrate;
562 break;
563 case TUNSDEBUG:
564 tundebug = *(int *)data;
565 break;
566 case TUNGDEBUG:
567 *(int *)data = tundebug;
568 break;
569 case TUNSLMODE:
570 mtx_lock(&tp->tun_mtx);
571 if (*(int *)data) {
572 tp->tun_flags |= TUN_LMODE;
573 tp->tun_flags &= ~TUN_IFHEAD;
574 } else
575 tp->tun_flags &= ~TUN_LMODE;
576 mtx_unlock(&tp->tun_mtx);
577 break;
578 case TUNSIFHEAD:
579 mtx_lock(&tp->tun_mtx);
580 if (*(int *)data) {
581 tp->tun_flags |= TUN_IFHEAD;
582 tp->tun_flags &= ~TUN_LMODE;
583 } else
584 tp->tun_flags &= ~TUN_IFHEAD;
585 mtx_unlock(&tp->tun_mtx);
586 break;
587 case TUNGIFHEAD:
588 /* Could be unlocked read? */
589 mtx_lock(&tp->tun_mtx);
590 *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
591 mtx_unlock(&tp->tun_mtx);
592 break;
593 case TUNSIFMODE:
594 /* deny this if UP */
595 if (tp->tun_if.if_flags & IFF_UP)
596 return(EBUSY);
597
598 switch (*(int *)data & ~IFF_MULTICAST) {
599 case IFF_POINTOPOINT:
600 case IFF_BROADCAST:
601 tp->tun_if.if_flags &=
602 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
603 tp->tun_if.if_flags |= *(int *)data;
604 break;
605 default:
606 return(EINVAL);
607 }
608 break;
609 case TUNSIFPID:
610 mtx_lock(&tp->tun_mtx);
611 tp->tun_pid = curthread->td_proc->p_pid;
612 mtx_unlock(&tp->tun_mtx);
613 break;
614 case FIONBIO:
615 break;
616 case FIOASYNC:
617 mtx_lock(&tp->tun_mtx);
618 if (*(int *)data)
619 tp->tun_flags |= TUN_ASYNC;
620 else
621 tp->tun_flags &= ~TUN_ASYNC;
622 mtx_unlock(&tp->tun_mtx);
623 break;
624 case FIONREAD:
625 s = splimp();
626 if (tp->tun_if.if_snd.ifq_head) {
627 struct mbuf *mb = tp->tun_if.if_snd.ifq_head;
628 for( *(int *)data = 0; mb != 0; mb = mb->m_next)
629 *(int *)data += mb->m_len;
630 } else
631 *(int *)data = 0;
632 splx(s);
633 break;
634 case FIOSETOWN:
635 return (fsetown(*(int *)data, &tp->tun_sigio));
636
637 case FIOGETOWN:
638 *(int *)data = fgetown(&tp->tun_sigio);
639 return (0);
640
641 /* This is deprecated, FIOSETOWN should be used instead. */
642 case TIOCSPGRP:
643 return (fsetown(-(*(int *)data), &tp->tun_sigio));
644
645 /* This is deprecated, FIOGETOWN should be used instead. */
646 case TIOCGPGRP:
647 *(int *)data = -fgetown(&tp->tun_sigio);
648 return (0);
649
650 default:
651 return (ENOTTY);
652 }
653 return (0);
654}
655
656/*
657 * The cdevsw read interface - reads a packet at a time, or at
658 * least as much of a packet as can be read.
659 */
660static int
661tunread(struct cdev *dev, struct uio *uio, int flag)
662{
663 struct tun_softc *tp = dev->si_drv1;
664 struct ifnet *ifp = &tp->tun_if;
665 struct mbuf *m;
666 int error=0, len, s;
667
668 TUNDEBUG (ifp, "read\n");
669 mtx_lock(&tp->tun_mtx);
670 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
671 mtx_unlock(&tp->tun_mtx);
672 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
673 return (EHOSTDOWN);
674 }
675
676 tp->tun_flags &= ~TUN_RWAIT;
677 mtx_unlock(&tp->tun_mtx);
678
679 s = splimp();
680 do {
681 IF_DEQUEUE(&ifp->if_snd, m);
682 if (m == NULL) {
683 if (flag & IO_NDELAY) {
684 splx(s);
685 return (EWOULDBLOCK);
686 }
687 mtx_lock(&tp->tun_mtx);
688 tp->tun_flags |= TUN_RWAIT;
689 mtx_unlock(&tp->tun_mtx);
690 if((error = tsleep(tp, PCATCH | (PZERO + 1),
691 "tunread", 0)) != 0) {
692 splx(s);
693 return (error);
694 }
695 }
696 } while (m == NULL);
697 splx(s);
698
699 while (m && uio->uio_resid > 0 && error == 0) {
700 len = min(uio->uio_resid, m->m_len);
701 if (len != 0)
702 error = uiomove(mtod(m, void *), len, uio);
703 m = m_free(m);
704 }
705
706 if (m) {
707 TUNDEBUG(ifp, "Dropping mbuf\n");
708 m_freem(m);
709 }
710 return (error);
711}
712
713/*
714 * the cdevsw write interface - an atomic write is a packet - or else!
715 */
716static int
717tunwrite(struct cdev *dev, struct uio *uio, int flag)
718{
719 struct tun_softc *tp = dev->si_drv1;
720 struct ifnet *ifp = &tp->tun_if;
721 struct mbuf *top, **mp, *m;
722 int error=0, tlen, mlen;
723 uint32_t family;
724 int isr;
725
726 TUNDEBUG(ifp, "tunwrite\n");
727
728 if ((ifp->if_flags & IFF_UP) != IFF_UP)
729 /* ignore silently */
730 return (0);
731
732 if (uio->uio_resid == 0)
733 return (0);
734
735 if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
736 TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
737 return (EIO);
738 }
739 tlen = uio->uio_resid;
740
741 /* get a header mbuf */
742 MGETHDR(m, M_DONTWAIT, MT_DATA);
743 if (m == NULL)
744 return (ENOBUFS);
745 mlen = MHLEN;
746
747 top = 0;
748 mp = &top;
749 while (error == 0 && uio->uio_resid > 0) {
750 m->m_len = min(mlen, uio->uio_resid);
751 error = uiomove(mtod(m, void *), m->m_len, uio);
752 *mp = m;
753 mp = &m->m_next;
754 if (uio->uio_resid > 0) {
755 MGET (m, M_DONTWAIT, MT_DATA);
756 if (m == 0) {
757 error = ENOBUFS;
758 break;
759 }
760 mlen = MLEN;
761 }
762 }
763 if (error) {
764 if (top)
765 m_freem (top);
766 ifp->if_ierrors++;
767 return (error);
768 }
769
770 top->m_pkthdr.len = tlen;
771 top->m_pkthdr.rcvif = ifp;
772#ifdef MAC
773 mac_create_mbuf_from_ifnet(ifp, top);
774#endif
775
776 /* Could be unlocked read? */
777 mtx_lock(&tp->tun_mtx);
778 if (tp->tun_flags & TUN_IFHEAD) {
779 mtx_unlock(&tp->tun_mtx);
780 if (top->m_len < sizeof(family) &&
781 (top = m_pullup(top, sizeof(family))) == NULL)
782 return (ENOBUFS);
783 family = ntohl(*mtod(top, u_int32_t *));
784 m_adj(top, sizeof(family));
785 } else {
786 mtx_unlock(&tp->tun_mtx);
787 family = AF_INET;
788 }
789
790 BPF_MTAP2(ifp, &family, sizeof(family), top);
791
792 switch (family) {
793#ifdef INET
794 case AF_INET:
795 isr = NETISR_IP;
796 break;
797#endif
798#ifdef INET6
799 case AF_INET6:
800 isr = NETISR_IPV6;
801 break;
802#endif
803#ifdef IPX
804 case AF_IPX:
805 isr = NETISR_IPX;
806 break;
807#endif
808#ifdef NETATALK
809 case AF_APPLETALK:
810 isr = NETISR_ATALK2;
811 break;
812#endif
813 default:
814 m_freem(m);
815 return (EAFNOSUPPORT);
816 }
817 /* First chunk of an mbuf contains good junk */
818 if (harvest.point_to_point)
819 random_harvest(m, 16, 3, 0, RANDOM_NET);
820 ifp->if_ibytes += top->m_pkthdr.len;
821 ifp->if_ipackets++;
822 netisr_dispatch(isr, top);
823 return (0);
824}
825
826/*
827 * tunpoll - the poll interface, this is only useful on reads
828 * really. The write detect always returns true, write never blocks
829 * anyway, it either accepts the packet or drops it.
830 */
831static int
832tunpoll(struct cdev *dev, int events, struct thread *td)
833{
834 int s;
835 struct tun_softc *tp = dev->si_drv1;
836 struct ifnet *ifp = &tp->tun_if;
837 int revents = 0;
838
839 s = splimp();
840 TUNDEBUG(ifp, "tunpoll\n");
841
842 if (events & (POLLIN | POLLRDNORM)) {
843 if (ifp->if_snd.ifq_len > 0) {
844 TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
845 revents |= events & (POLLIN | POLLRDNORM);
846 } else {
847 TUNDEBUG(ifp, "tunpoll waiting\n");
848 selrecord(td, &tp->tun_rsel);
849 }
850 }
851 if (events & (POLLOUT | POLLWRNORM))
852 revents |= events & (POLLOUT | POLLWRNORM);
853
854 splx(s);
855 return (revents);
856}
142 return;
143
144 if (strcmp(name, TUNNAME) == 0) {
145 u = -1;
146 } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
147 return; /* Don't recognise the name */
148 if (u != -1 && u > IF_MAXUNIT)
149 return; /* Unit number too high */
150
151 /* find any existing device, or allocate new unit number */
152 i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
153 if (i) {
154 /* No preexisting struct cdev *, create one */
155 *dev = make_dev(&tun_cdevsw, unit2minor(u),
156 UID_UUCP, GID_DIALER, 0600, "tun%d", u);
157 if (*dev != NULL)
158 (*dev)->si_flags |= SI_CHEAPCLONE;
159 }
160}
161
162static void
163tun_destroy(struct tun_softc *tp)
164{
165 struct cdev *dev;
166
167 /* Unlocked read. */
168 KASSERT((tp->tun_flags & TUN_OPEN) == 0,
169 ("tununits is out of sync - unit %d", tp->tun_if.if_dunit));
170
171 dev = tp->tun_dev;
172 bpfdetach(&tp->tun_if);
173 if_detach(&tp->tun_if);
174 destroy_dev(dev);
175 mtx_destroy(&tp->tun_mtx);
176 free(tp, M_TUN);
177}
178
179static int
180tunmodevent(module_t mod, int type, void *data)
181{
182 static eventhandler_tag tag;
183 struct tun_softc *tp;
184
185 switch (type) {
186 case MOD_LOAD:
187 mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
188 clone_setup(&tunclones);
189 tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
190 if (tag == NULL)
191 return (ENOMEM);
192 break;
193 case MOD_UNLOAD:
194 EVENTHANDLER_DEREGISTER(dev_clone, tag);
195
196 mtx_lock(&tunmtx);
197 while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
198 TAILQ_REMOVE(&tunhead, tp, tun_list);
199 mtx_unlock(&tunmtx);
200 tun_destroy(tp);
201 mtx_lock(&tunmtx);
202 }
203 mtx_unlock(&tunmtx);
204 clone_cleanup(&tunclones);
205 mtx_destroy(&tunmtx);
206 break;
207 }
208 return 0;
209}
210
211static moduledata_t tun_mod = {
212 "if_tun",
213 tunmodevent,
214 0
215};
216
217DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
218
219static void
220tunstart(struct ifnet *ifp)
221{
222 struct tun_softc *tp = ifp->if_softc;
223
224 mtx_lock(&tp->tun_mtx);
225 if (tp->tun_flags & TUN_RWAIT) {
226 tp->tun_flags &= ~TUN_RWAIT;
227 wakeup(tp);
228 }
229 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
230 mtx_unlock(&tp->tun_mtx);
231 pgsigio(&tp->tun_sigio, SIGIO, 0);
232 } else
233 mtx_unlock(&tp->tun_mtx);
234 selwakeuppri(&tp->tun_rsel, PZERO + 1);
235}
236
237static void
238tuncreate(struct cdev *dev)
239{
240 struct tun_softc *sc;
241 struct ifnet *ifp;
242
243 dev->si_flags &= ~SI_CHEAPCLONE;
244
245 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
246 mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
247 sc->tun_flags = TUN_INITED;
248 sc->tun_dev = dev;
249 mtx_lock(&tunmtx);
250 TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
251 mtx_unlock(&tunmtx);
252
253 ifp = &sc->tun_if;
254 if_initname(ifp, TUNNAME, dev2unit(dev));
255 ifp->if_mtu = TUNMTU;
256 ifp->if_ioctl = tunifioctl;
257 ifp->if_output = tunoutput;
258 ifp->if_start = tunstart;
259 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
260 ifp->if_type = IFT_PPP;
261 ifp->if_snd.ifq_maxlen = ifqmaxlen;
262 ifp->if_softc = sc;
263 if_attach(ifp);
264 bpfattach(ifp, DLT_NULL, sizeof(u_int));
265 dev->si_drv1 = sc;
266}
267
268static int
269tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
270{
271 struct ifnet *ifp;
272 struct tun_softc *tp;
273
274 /*
275 * XXXRW: Non-atomic test and set of dev->si_drv1 requires
276 * synchronization.
277 */
278 tp = dev->si_drv1;
279 if (!tp) {
280 tuncreate(dev);
281 tp = dev->si_drv1;
282 }
283
284 /*
285 * XXXRW: This use of tun_pid is subject to error due to the
286 * fact that a reference to the tunnel can live beyond the
287 * death of the process that created it. Can we replace this
288 * with a simple busy flag?
289 */
290 mtx_lock(&tp->tun_mtx);
291 if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
292 mtx_unlock(&tp->tun_mtx);
293 return (EBUSY);
294 }
295 tp->tun_pid = td->td_proc->p_pid;
296
297 tp->tun_flags |= TUN_OPEN;
298 mtx_unlock(&tp->tun_mtx);
299 ifp = &tp->tun_if;
300 TUNDEBUG(ifp, "open\n");
301
302 return (0);
303}
304
305/*
306 * tunclose - close the device - mark i/f down & delete
307 * routing info
308 */
309static int
310tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
311{
312 struct tun_softc *tp;
313 struct ifnet *ifp;
314 int s;
315
316 tp = dev->si_drv1;
317 ifp = &tp->tun_if;
318
319 mtx_lock(&tp->tun_mtx);
320 tp->tun_flags &= ~TUN_OPEN;
321 tp->tun_pid = 0;
322 mtx_unlock(&tp->tun_mtx);
323
324 /*
325 * junk all pending output
326 */
327 IF_DRAIN(&ifp->if_snd);
328
329 if (ifp->if_flags & IFF_UP) {
330 s = splimp();
331 if_down(ifp);
332 splx(s);
333 }
334
335 if (ifp->if_flags & IFF_RUNNING) {
336 struct ifaddr *ifa;
337
338 s = splimp();
339 /* find internet addresses and delete routes */
340 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
341 if (ifa->ifa_addr->sa_family == AF_INET)
342 /* Unlocked read. */
343 rtinit(ifa, (int)RTM_DELETE,
344 tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
345 ifp->if_flags &= ~IFF_RUNNING;
346 splx(s);
347 }
348
349 funsetown(&tp->tun_sigio);
350 selwakeuppri(&tp->tun_rsel, PZERO + 1);
351 TUNDEBUG (ifp, "closed\n");
352 return (0);
353}
354
355static int
356tuninit(struct ifnet *ifp)
357{
358 struct tun_softc *tp = ifp->if_softc;
359 struct ifaddr *ifa;
360 int error = 0;
361
362 TUNDEBUG(ifp, "tuninit\n");
363
364 ifp->if_flags |= IFF_UP | IFF_RUNNING;
365 getmicrotime(&ifp->if_lastchange);
366
367 for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
368 ifa = TAILQ_NEXT(ifa, ifa_link)) {
369 if (ifa->ifa_addr == NULL)
370 error = EFAULT;
371 /* XXX: Should maybe return straight off? */
372 else {
373#ifdef INET
374 if (ifa->ifa_addr->sa_family == AF_INET) {
375 struct sockaddr_in *si;
376
377 si = (struct sockaddr_in *)ifa->ifa_addr;
378 mtx_lock(&tp->tun_mtx);
379 if (si->sin_addr.s_addr)
380 tp->tun_flags |= TUN_IASET;
381
382 si = (struct sockaddr_in *)ifa->ifa_dstaddr;
383 if (si && si->sin_addr.s_addr)
384 tp->tun_flags |= TUN_DSTADDR;
385 mtx_unlock(&tp->tun_mtx);
386 }
387#endif
388 }
389 }
390 return (error);
391}
392
393/*
394 * Process an ioctl request.
395 */
396static int
397tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
398{
399 struct ifreq *ifr = (struct ifreq *)data;
400 struct tun_softc *tp = ifp->if_softc;
401 struct ifstat *ifs;
402 int error = 0, s;
403
404 s = splimp();
405 switch(cmd) {
406 case SIOCGIFSTATUS:
407 ifs = (struct ifstat *)data;
408 mtx_lock(&tp->tun_mtx);
409 if (tp->tun_pid)
410 sprintf(ifs->ascii + strlen(ifs->ascii),
411 "\tOpened by PID %d\n", tp->tun_pid);
412 mtx_unlock(&tp->tun_mtx);
413 break;
414 case SIOCSIFADDR:
415 error = tuninit(ifp);
416 TUNDEBUG(ifp, "address set, error=%d\n", error);
417 break;
418 case SIOCSIFDSTADDR:
419 error = tuninit(ifp);
420 TUNDEBUG(ifp, "destination address set, error=%d\n", error);
421 break;
422 case SIOCSIFMTU:
423 ifp->if_mtu = ifr->ifr_mtu;
424 TUNDEBUG(ifp, "mtu set\n");
425 break;
426 case SIOCSIFFLAGS:
427 case SIOCADDMULTI:
428 case SIOCDELMULTI:
429 break;
430 default:
431 error = EINVAL;
432 }
433 splx(s);
434 return (error);
435}
436
437/*
438 * tunoutput - queue packets from higher level ready to put out.
439 */
440static int
441tunoutput(
442 struct ifnet *ifp,
443 struct mbuf *m0,
444 struct sockaddr *dst,
445 struct rtentry *rt)
446{
447 struct tun_softc *tp = ifp->if_softc;
448 u_short cached_tun_flags;
449 int error;
450
451 TUNDEBUG (ifp, "tunoutput\n");
452
453#ifdef MAC
454 error = mac_check_ifnet_transmit(ifp, m0);
455 if (error) {
456 m_freem(m0);
457 return (error);
458 }
459#endif
460
461 /* Could be unlocked read? */
462 mtx_lock(&tp->tun_mtx);
463 cached_tun_flags = tp->tun_flags;
464 mtx_unlock(&tp->tun_mtx);
465 if ((cached_tun_flags & TUN_READY) != TUN_READY) {
466 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
467 m_freem (m0);
468 return (EHOSTDOWN);
469 }
470
471 if ((ifp->if_flags & IFF_UP) != IFF_UP) {
472 m_freem (m0);
473 return (EHOSTDOWN);
474 }
475
476 /* BPF write needs to be handled specially */
477 if (dst->sa_family == AF_UNSPEC) {
478 dst->sa_family = *(mtod(m0, int *));
479 m0->m_len -= sizeof(int);
480 m0->m_pkthdr.len -= sizeof(int);
481 m0->m_data += sizeof(int);
482 }
483
484 if (ifp->if_bpf) {
485 uint32_t af = dst->sa_family;
486 bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
487 }
488
489 /* prepend sockaddr? this may abort if the mbuf allocation fails */
490 if (cached_tun_flags & TUN_LMODE) {
491 /* allocate space for sockaddr */
492 M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
493
494 /* if allocation failed drop packet */
495 if (m0 == NULL) {
496 ifp->if_iqdrops++;
497 ifp->if_oerrors++;
498 return (ENOBUFS);
499 } else {
500 bcopy(dst, m0->m_data, dst->sa_len);
501 }
502 }
503
504 if (cached_tun_flags & TUN_IFHEAD) {
505 /* Prepend the address family */
506 M_PREPEND(m0, 4, M_DONTWAIT);
507
508 /* if allocation failed drop packet */
509 if (m0 == NULL) {
510 ifp->if_iqdrops++;
511 ifp->if_oerrors++;
512 return (ENOBUFS);
513 } else
514 *(u_int32_t *)m0->m_data = htonl(dst->sa_family);
515 } else {
516#ifdef INET
517 if (dst->sa_family != AF_INET)
518#endif
519 {
520 m_freem(m0);
521 return (EAFNOSUPPORT);
522 }
523 }
524
525 IFQ_HANDOFF(ifp, m0, error);
526 if (error) {
527 ifp->if_collisions++;
528 return (ENOBUFS);
529 }
530 ifp->if_opackets++;
531 return (0);
532}
533
534/*
535 * the cdevsw interface is now pretty minimal.
536 */
537static int
538tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
539{
540 int s;
541 int error;
542 struct tun_softc *tp = dev->si_drv1;
543 struct tuninfo *tunp;
544
545 switch (cmd) {
546 case TUNSIFINFO:
547 tunp = (struct tuninfo *)data;
548 if (tunp->mtu < IF_MINMTU)
549 return (EINVAL);
550 if (tp->tun_if.if_mtu != tunp->mtu
551 && (error = suser(td)) != 0)
552 return (error);
553 tp->tun_if.if_mtu = tunp->mtu;
554 tp->tun_if.if_type = tunp->type;
555 tp->tun_if.if_baudrate = tunp->baudrate;
556 break;
557 case TUNGIFINFO:
558 tunp = (struct tuninfo *)data;
559 tunp->mtu = tp->tun_if.if_mtu;
560 tunp->type = tp->tun_if.if_type;
561 tunp->baudrate = tp->tun_if.if_baudrate;
562 break;
563 case TUNSDEBUG:
564 tundebug = *(int *)data;
565 break;
566 case TUNGDEBUG:
567 *(int *)data = tundebug;
568 break;
569 case TUNSLMODE:
570 mtx_lock(&tp->tun_mtx);
571 if (*(int *)data) {
572 tp->tun_flags |= TUN_LMODE;
573 tp->tun_flags &= ~TUN_IFHEAD;
574 } else
575 tp->tun_flags &= ~TUN_LMODE;
576 mtx_unlock(&tp->tun_mtx);
577 break;
578 case TUNSIFHEAD:
579 mtx_lock(&tp->tun_mtx);
580 if (*(int *)data) {
581 tp->tun_flags |= TUN_IFHEAD;
582 tp->tun_flags &= ~TUN_LMODE;
583 } else
584 tp->tun_flags &= ~TUN_IFHEAD;
585 mtx_unlock(&tp->tun_mtx);
586 break;
587 case TUNGIFHEAD:
588 /* Could be unlocked read? */
589 mtx_lock(&tp->tun_mtx);
590 *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
591 mtx_unlock(&tp->tun_mtx);
592 break;
593 case TUNSIFMODE:
594 /* deny this if UP */
595 if (tp->tun_if.if_flags & IFF_UP)
596 return(EBUSY);
597
598 switch (*(int *)data & ~IFF_MULTICAST) {
599 case IFF_POINTOPOINT:
600 case IFF_BROADCAST:
601 tp->tun_if.if_flags &=
602 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
603 tp->tun_if.if_flags |= *(int *)data;
604 break;
605 default:
606 return(EINVAL);
607 }
608 break;
609 case TUNSIFPID:
610 mtx_lock(&tp->tun_mtx);
611 tp->tun_pid = curthread->td_proc->p_pid;
612 mtx_unlock(&tp->tun_mtx);
613 break;
614 case FIONBIO:
615 break;
616 case FIOASYNC:
617 mtx_lock(&tp->tun_mtx);
618 if (*(int *)data)
619 tp->tun_flags |= TUN_ASYNC;
620 else
621 tp->tun_flags &= ~TUN_ASYNC;
622 mtx_unlock(&tp->tun_mtx);
623 break;
624 case FIONREAD:
625 s = splimp();
626 if (tp->tun_if.if_snd.ifq_head) {
627 struct mbuf *mb = tp->tun_if.if_snd.ifq_head;
628 for( *(int *)data = 0; mb != 0; mb = mb->m_next)
629 *(int *)data += mb->m_len;
630 } else
631 *(int *)data = 0;
632 splx(s);
633 break;
634 case FIOSETOWN:
635 return (fsetown(*(int *)data, &tp->tun_sigio));
636
637 case FIOGETOWN:
638 *(int *)data = fgetown(&tp->tun_sigio);
639 return (0);
640
641 /* This is deprecated, FIOSETOWN should be used instead. */
642 case TIOCSPGRP:
643 return (fsetown(-(*(int *)data), &tp->tun_sigio));
644
645 /* This is deprecated, FIOGETOWN should be used instead. */
646 case TIOCGPGRP:
647 *(int *)data = -fgetown(&tp->tun_sigio);
648 return (0);
649
650 default:
651 return (ENOTTY);
652 }
653 return (0);
654}
655
656/*
657 * The cdevsw read interface - reads a packet at a time, or at
658 * least as much of a packet as can be read.
659 */
660static int
661tunread(struct cdev *dev, struct uio *uio, int flag)
662{
663 struct tun_softc *tp = dev->si_drv1;
664 struct ifnet *ifp = &tp->tun_if;
665 struct mbuf *m;
666 int error=0, len, s;
667
668 TUNDEBUG (ifp, "read\n");
669 mtx_lock(&tp->tun_mtx);
670 if ((tp->tun_flags & TUN_READY) != TUN_READY) {
671 mtx_unlock(&tp->tun_mtx);
672 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
673 return (EHOSTDOWN);
674 }
675
676 tp->tun_flags &= ~TUN_RWAIT;
677 mtx_unlock(&tp->tun_mtx);
678
679 s = splimp();
680 do {
681 IF_DEQUEUE(&ifp->if_snd, m);
682 if (m == NULL) {
683 if (flag & IO_NDELAY) {
684 splx(s);
685 return (EWOULDBLOCK);
686 }
687 mtx_lock(&tp->tun_mtx);
688 tp->tun_flags |= TUN_RWAIT;
689 mtx_unlock(&tp->tun_mtx);
690 if((error = tsleep(tp, PCATCH | (PZERO + 1),
691 "tunread", 0)) != 0) {
692 splx(s);
693 return (error);
694 }
695 }
696 } while (m == NULL);
697 splx(s);
698
699 while (m && uio->uio_resid > 0 && error == 0) {
700 len = min(uio->uio_resid, m->m_len);
701 if (len != 0)
702 error = uiomove(mtod(m, void *), len, uio);
703 m = m_free(m);
704 }
705
706 if (m) {
707 TUNDEBUG(ifp, "Dropping mbuf\n");
708 m_freem(m);
709 }
710 return (error);
711}
712
713/*
714 * the cdevsw write interface - an atomic write is a packet - or else!
715 */
716static int
717tunwrite(struct cdev *dev, struct uio *uio, int flag)
718{
719 struct tun_softc *tp = dev->si_drv1;
720 struct ifnet *ifp = &tp->tun_if;
721 struct mbuf *top, **mp, *m;
722 int error=0, tlen, mlen;
723 uint32_t family;
724 int isr;
725
726 TUNDEBUG(ifp, "tunwrite\n");
727
728 if ((ifp->if_flags & IFF_UP) != IFF_UP)
729 /* ignore silently */
730 return (0);
731
732 if (uio->uio_resid == 0)
733 return (0);
734
735 if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
736 TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
737 return (EIO);
738 }
739 tlen = uio->uio_resid;
740
741 /* get a header mbuf */
742 MGETHDR(m, M_DONTWAIT, MT_DATA);
743 if (m == NULL)
744 return (ENOBUFS);
745 mlen = MHLEN;
746
747 top = 0;
748 mp = &top;
749 while (error == 0 && uio->uio_resid > 0) {
750 m->m_len = min(mlen, uio->uio_resid);
751 error = uiomove(mtod(m, void *), m->m_len, uio);
752 *mp = m;
753 mp = &m->m_next;
754 if (uio->uio_resid > 0) {
755 MGET (m, M_DONTWAIT, MT_DATA);
756 if (m == 0) {
757 error = ENOBUFS;
758 break;
759 }
760 mlen = MLEN;
761 }
762 }
763 if (error) {
764 if (top)
765 m_freem (top);
766 ifp->if_ierrors++;
767 return (error);
768 }
769
770 top->m_pkthdr.len = tlen;
771 top->m_pkthdr.rcvif = ifp;
772#ifdef MAC
773 mac_create_mbuf_from_ifnet(ifp, top);
774#endif
775
776 /* Could be unlocked read? */
777 mtx_lock(&tp->tun_mtx);
778 if (tp->tun_flags & TUN_IFHEAD) {
779 mtx_unlock(&tp->tun_mtx);
780 if (top->m_len < sizeof(family) &&
781 (top = m_pullup(top, sizeof(family))) == NULL)
782 return (ENOBUFS);
783 family = ntohl(*mtod(top, u_int32_t *));
784 m_adj(top, sizeof(family));
785 } else {
786 mtx_unlock(&tp->tun_mtx);
787 family = AF_INET;
788 }
789
790 BPF_MTAP2(ifp, &family, sizeof(family), top);
791
792 switch (family) {
793#ifdef INET
794 case AF_INET:
795 isr = NETISR_IP;
796 break;
797#endif
798#ifdef INET6
799 case AF_INET6:
800 isr = NETISR_IPV6;
801 break;
802#endif
803#ifdef IPX
804 case AF_IPX:
805 isr = NETISR_IPX;
806 break;
807#endif
808#ifdef NETATALK
809 case AF_APPLETALK:
810 isr = NETISR_ATALK2;
811 break;
812#endif
813 default:
814 m_freem(m);
815 return (EAFNOSUPPORT);
816 }
817 /* First chunk of an mbuf contains good junk */
818 if (harvest.point_to_point)
819 random_harvest(m, 16, 3, 0, RANDOM_NET);
820 ifp->if_ibytes += top->m_pkthdr.len;
821 ifp->if_ipackets++;
822 netisr_dispatch(isr, top);
823 return (0);
824}
825
826/*
827 * tunpoll - the poll interface, this is only useful on reads
828 * really. The write detect always returns true, write never blocks
829 * anyway, it either accepts the packet or drops it.
830 */
831static int
832tunpoll(struct cdev *dev, int events, struct thread *td)
833{
834 int s;
835 struct tun_softc *tp = dev->si_drv1;
836 struct ifnet *ifp = &tp->tun_if;
837 int revents = 0;
838
839 s = splimp();
840 TUNDEBUG(ifp, "tunpoll\n");
841
842 if (events & (POLLIN | POLLRDNORM)) {
843 if (ifp->if_snd.ifq_len > 0) {
844 TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
845 revents |= events & (POLLIN | POLLRDNORM);
846 } else {
847 TUNDEBUG(ifp, "tunpoll waiting\n");
848 selrecord(td, &tp->tun_rsel);
849 }
850 }
851 if (events & (POLLOUT | POLLWRNORM))
852 revents |= events & (POLLOUT | POLLWRNORM);
853
854 splx(s);
855 return (revents);
856}