ng_pipe.c revision 215800
1/*-
2 * Copyright (c) 2004-2010 University of Zagreb
3 * Copyright (c) 2007-2008 FreeBSD Foundation
4 *
5 * This software was developed by the University of Zagreb and the
6 * FreeBSD Foundation under sponsorship by the Stichting NLnet and the
7 * FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * $FreeBSD: head/sys/netgraph/ng_pipe.c 215800 2010-11-24 16:02:58Z zec $
31 */
32
33/*
34 * This node permits simple traffic shaping by emulating bandwidth
35 * and delay, as well as random packet losses.
36 * The node has two hooks, upper and lower. Traffic flowing from upper to
37 * lower hook is referenced as downstream, and vice versa. Parameters for
38 * both directions can be set separately, except for delay.
39 */
40
41
42#include <sys/param.h>
43#include <sys/errno.h>
44#include <sys/systm.h>
45#include <sys/kernel.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/time.h>
49
50#include <vm/uma.h>
51
52#include <net/vnet.h>
53
54#include <netinet/in.h>
55#include <netinet/in_systm.h>
56#include <netinet/ip.h>
57
58#include <netgraph/ng_message.h>
59#include <netgraph/netgraph.h>
60#include <netgraph/ng_parse.h>
61#include <netgraph/ng_pipe.h>
62
63static MALLOC_DEFINE(M_NG_PIPE, "ng_pipe", "ng_pipe");
64
65/* Packet header struct */
66struct ngp_hdr {
67	TAILQ_ENTRY(ngp_hdr)	ngp_link;	/* next pkt in queue */
68	struct timeval		when;		/* this packet's due time */
69	struct mbuf		*m;		/* ptr to the packet data */
70};
71TAILQ_HEAD(p_head, ngp_hdr);
72
73/* FIFO queue struct */
74struct ngp_fifo {
75	TAILQ_ENTRY(ngp_fifo)	fifo_le;	/* list of active queues only */
76	struct p_head		packet_head;	/* FIFO queue head */
77	u_int32_t		hash;		/* flow signature */
78	struct timeval		vtime;		/* virtual time, for WFQ */
79	u_int32_t		rr_deficit;	/* for DRR */
80	u_int32_t		packets;	/* # of packets in this queue */
81};
82
83/* Per hook info */
84struct hookinfo {
85	hook_p			hook;
86	int			noqueue;	/* bypass any processing */
87	TAILQ_HEAD(, ngp_fifo)	fifo_head;	/* FIFO queues */
88	TAILQ_HEAD(, ngp_hdr)	qout_head;	/* delay queue head */
89	struct timeval		qin_utime;
90	struct ng_pipe_hookcfg	cfg;
91	struct ng_pipe_hookrun	run;
92	struct ng_pipe_hookstat	stats;
93	uint64_t		*ber_p;		/* loss_p(BER,psize) map */
94};
95
96/* Per node info */
97struct node_priv {
98	u_int64_t		delay;
99	u_int32_t		overhead;
100	u_int32_t		header_offset;
101	struct hookinfo		lower;
102	struct hookinfo		upper;
103	struct callout		timer;
104	int			timer_scheduled;
105};
106typedef struct node_priv *priv_p;
107
108/* Macro for calculating the virtual time for packet dequeueing in WFQ */
109#define FIFO_VTIME_SORT(plen)						\
110	if (hinfo->cfg.wfq && hinfo->cfg.bandwidth) {			\
111		ngp_f->vtime.tv_usec = now->tv_usec + ((uint64_t) (plen) \
112			+ priv->overhead ) * hinfo->run.fifo_queues *	\
113			8000000 / hinfo->cfg.bandwidth;			\
114		ngp_f->vtime.tv_sec = now->tv_sec +			\
115			ngp_f->vtime.tv_usec / 1000000;			\
116		ngp_f->vtime.tv_usec = ngp_f->vtime.tv_usec % 1000000;	\
117		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)	\
118			if (ngp_f1->vtime.tv_sec > ngp_f->vtime.tv_sec || \
119			    (ngp_f1->vtime.tv_sec == ngp_f->vtime.tv_sec && \
120			    ngp_f1->vtime.tv_usec > ngp_f->vtime.tv_usec)) \
121				break;					\
122		if (ngp_f1 == NULL)					\
123			TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le); \
124		else							\
125			TAILQ_INSERT_BEFORE(ngp_f1, ngp_f, fifo_le);	\
126	} else								\
127		TAILQ_INSERT_TAIL(&hinfo->fifo_head, ngp_f, fifo_le);	\
128
129
130static void	parse_cfg(struct ng_pipe_hookcfg *, struct ng_pipe_hookcfg *,
131			struct hookinfo *, priv_p);
132static void	pipe_dequeue(struct hookinfo *, struct timeval *);
133static void	ngp_callout(node_p, hook_p, void *, int);
134static int	ngp_modevent(module_t, int, void *);
135
136/* zone for storing ngp_hdr-s */
137static uma_zone_t ngp_zone;
138
139/* Netgraph methods */
140static ng_constructor_t	ngp_constructor;
141static ng_rcvmsg_t	ngp_rcvmsg;
142static ng_shutdown_t	ngp_shutdown;
143static ng_newhook_t	ngp_newhook;
144static ng_rcvdata_t	ngp_rcvdata;
145static ng_disconnect_t	ngp_disconnect;
146
147/* Parse type for struct ng_pipe_hookstat */
148static const struct ng_parse_struct_field
149	ng_pipe_hookstat_type_fields[] = NG_PIPE_HOOKSTAT_INFO;
150static const struct ng_parse_type ng_pipe_hookstat_type = {
151	&ng_parse_struct_type,
152	&ng_pipe_hookstat_type_fields
153};
154
155/* Parse type for struct ng_pipe_stats */
156static const struct ng_parse_struct_field ng_pipe_stats_type_fields[] =
157	NG_PIPE_STATS_INFO(&ng_pipe_hookstat_type);
158static const struct ng_parse_type ng_pipe_stats_type = {
159	&ng_parse_struct_type,
160	&ng_pipe_stats_type_fields
161};
162
163/* Parse type for struct ng_pipe_hookrun */
164static const struct ng_parse_struct_field
165	ng_pipe_hookrun_type_fields[] = NG_PIPE_HOOKRUN_INFO;
166static const struct ng_parse_type ng_pipe_hookrun_type = {
167	&ng_parse_struct_type,
168	&ng_pipe_hookrun_type_fields
169};
170
171/* Parse type for struct ng_pipe_run */
172static const struct ng_parse_struct_field
173	ng_pipe_run_type_fields[] = NG_PIPE_RUN_INFO(&ng_pipe_hookrun_type);
174static const struct ng_parse_type ng_pipe_run_type = {
175	&ng_parse_struct_type,
176	&ng_pipe_run_type_fields
177};
178
179/* Parse type for struct ng_pipe_hookcfg */
180static const struct ng_parse_struct_field
181	ng_pipe_hookcfg_type_fields[] = NG_PIPE_HOOKCFG_INFO;
182static const struct ng_parse_type ng_pipe_hookcfg_type = {
183	&ng_parse_struct_type,
184	&ng_pipe_hookcfg_type_fields
185};
186
187/* Parse type for struct ng_pipe_cfg */
188static const struct ng_parse_struct_field
189	ng_pipe_cfg_type_fields[] = NG_PIPE_CFG_INFO(&ng_pipe_hookcfg_type);
190static const struct ng_parse_type ng_pipe_cfg_type = {
191	&ng_parse_struct_type,
192	&ng_pipe_cfg_type_fields
193};
194
195/* List of commands and how to convert arguments to/from ASCII */
196static const struct ng_cmdlist ngp_cmds[] = {
197	{
198		.cookie =	NGM_PIPE_COOKIE,
199		.cmd =		NGM_PIPE_GET_STATS,
200		.name = 	"getstats",
201		.respType =	 &ng_pipe_stats_type
202	},
203	{
204		.cookie =	NGM_PIPE_COOKIE,
205		.cmd =		NGM_PIPE_CLR_STATS,
206		.name =		"clrstats"
207	},
208	{
209		.cookie =	NGM_PIPE_COOKIE,
210		.cmd =		NGM_PIPE_GETCLR_STATS,
211		.name =		"getclrstats",
212		.respType =	&ng_pipe_stats_type
213	},
214	{
215		.cookie =	NGM_PIPE_COOKIE,
216		.cmd =		NGM_PIPE_GET_RUN,
217		.name =		"getrun",
218		.respType =	&ng_pipe_run_type
219	},
220	{
221		.cookie =	NGM_PIPE_COOKIE,
222		.cmd =		NGM_PIPE_GET_CFG,
223		.name =		"getcfg",
224		.respType =	&ng_pipe_cfg_type
225	},
226	{
227		.cookie =	NGM_PIPE_COOKIE,
228		.cmd =		NGM_PIPE_SET_CFG,
229		.name =		"setcfg",
230		.mesgType =	&ng_pipe_cfg_type,
231	},
232	{ 0 }
233};
234
235/* Netgraph type descriptor */
236static struct ng_type ng_pipe_typestruct = {
237	.version =	NG_ABI_VERSION,
238	.name =		NG_PIPE_NODE_TYPE,
239	.mod_event =	ngp_modevent,
240	.constructor =	ngp_constructor,
241	.shutdown =	ngp_shutdown,
242	.rcvmsg =	ngp_rcvmsg,
243	.newhook =	ngp_newhook,
244	.rcvdata =	ngp_rcvdata,
245	.disconnect =	ngp_disconnect,
246	.cmdlist =	ngp_cmds
247};
248NETGRAPH_INIT(pipe, &ng_pipe_typestruct);
249
250/* Node constructor */
251static int
252ngp_constructor(node_p node)
253{
254	priv_p priv;
255
256	priv = malloc(sizeof(*priv), M_NG_PIPE, M_ZERO | M_NOWAIT);
257	if (priv == NULL)
258		return (ENOMEM);
259	NG_NODE_SET_PRIVATE(node, priv);
260
261	/* Mark node as single-threaded */
262	NG_NODE_FORCE_WRITER(node);
263
264	ng_callout_init(&priv->timer);
265
266	return (0);
267}
268
269/* Add a hook */
270static int
271ngp_newhook(node_p node, hook_p hook, const char *name)
272{
273	const priv_p priv = NG_NODE_PRIVATE(node);
274	struct hookinfo *hinfo;
275
276	if (strcmp(name, NG_PIPE_HOOK_UPPER) == 0) {
277		bzero(&priv->upper, sizeof(priv->upper));
278		priv->upper.hook = hook;
279		NG_HOOK_SET_PRIVATE(hook, &priv->upper);
280	} else if (strcmp(name, NG_PIPE_HOOK_LOWER) == 0) {
281		bzero(&priv->lower, sizeof(priv->lower));
282		priv->lower.hook = hook;
283		NG_HOOK_SET_PRIVATE(hook, &priv->lower);
284	} else
285		return (EINVAL);
286
287	/* Load non-zero initial cfg values */
288	hinfo = NG_HOOK_PRIVATE(hook);
289	hinfo->cfg.qin_size_limit = 50;
290	hinfo->cfg.fifo = 1;
291	hinfo->cfg.droptail = 1;
292	TAILQ_INIT(&hinfo->fifo_head);
293	TAILQ_INIT(&hinfo->qout_head);
294	return (0);
295}
296
297/* Receive a control message */
298static int
299ngp_rcvmsg(node_p node, item_p item, hook_p lasthook)
300{
301	const priv_p priv = NG_NODE_PRIVATE(node);
302	struct ng_mesg *resp = NULL;
303	struct ng_mesg *msg;
304	struct ng_pipe_stats *stats;
305	struct ng_pipe_run *run;
306	struct ng_pipe_cfg *cfg;
307	int error = 0;
308
309	NGI_GET_MSG(item, msg);
310	switch (msg->header.typecookie) {
311	case NGM_PIPE_COOKIE:
312		switch (msg->header.cmd) {
313		case NGM_PIPE_GET_STATS:
314		case NGM_PIPE_CLR_STATS:
315		case NGM_PIPE_GETCLR_STATS:
316			if (msg->header.cmd != NGM_PIPE_CLR_STATS) {
317				NG_MKRESPONSE(resp, msg,
318				    sizeof(*stats), M_NOWAIT);
319				if (resp == NULL) {
320					error = ENOMEM;
321					break;
322				}
323				stats = (struct ng_pipe_stats *) resp->data;
324				bcopy(&priv->upper.stats, &stats->downstream,
325				    sizeof(stats->downstream));
326				bcopy(&priv->lower.stats, &stats->upstream,
327				    sizeof(stats->upstream));
328			}
329			if (msg->header.cmd != NGM_PIPE_GET_STATS) {
330				bzero(&priv->upper.stats,
331				    sizeof(priv->upper.stats));
332				bzero(&priv->lower.stats,
333				    sizeof(priv->lower.stats));
334			}
335			break;
336		case NGM_PIPE_GET_RUN:
337			NG_MKRESPONSE(resp, msg, sizeof(*run), M_NOWAIT);
338			if (resp == NULL) {
339				error = ENOMEM;
340				break;
341			}
342			run = (struct ng_pipe_run *) resp->data;
343			bcopy(&priv->upper.run, &run->downstream,
344				sizeof(run->downstream));
345			bcopy(&priv->lower.run, &run->upstream,
346				sizeof(run->upstream));
347			break;
348		case NGM_PIPE_GET_CFG:
349			NG_MKRESPONSE(resp, msg, sizeof(*cfg), M_NOWAIT);
350			if (resp == NULL) {
351				error = ENOMEM;
352				break;
353			}
354			cfg = (struct ng_pipe_cfg *) resp->data;
355			bcopy(&priv->upper.cfg, &cfg->downstream,
356				sizeof(cfg->downstream));
357			bcopy(&priv->lower.cfg, &cfg->upstream,
358				sizeof(cfg->upstream));
359			cfg->delay = priv->delay;
360			cfg->overhead = priv->overhead;
361			cfg->header_offset = priv->header_offset;
362			if (cfg->upstream.bandwidth ==
363			    cfg->downstream.bandwidth) {
364				cfg->bandwidth = cfg->upstream.bandwidth;
365				cfg->upstream.bandwidth = 0;
366				cfg->downstream.bandwidth = 0;
367			} else
368				cfg->bandwidth = 0;
369			break;
370		case NGM_PIPE_SET_CFG:
371			cfg = (struct ng_pipe_cfg *) msg->data;
372			if (msg->header.arglen != sizeof(*cfg)) {
373				error = EINVAL;
374				break;
375			}
376
377			if (cfg->delay == -1)
378				priv->delay = 0;
379			else if (cfg->delay > 0 && cfg->delay < 10000000)
380				priv->delay = cfg->delay;
381
382			if (cfg->bandwidth == -1) {
383				priv->upper.cfg.bandwidth = 0;
384				priv->lower.cfg.bandwidth = 0;
385				priv->overhead = 0;
386			} else if (cfg->bandwidth >= 100 &&
387			    cfg->bandwidth <= 1000000000) {
388				priv->upper.cfg.bandwidth = cfg->bandwidth;
389				priv->lower.cfg.bandwidth = cfg->bandwidth;
390				if (cfg->bandwidth >= 10000000)
391					priv->overhead = 8+4+12; /* Ethernet */
392				else
393					priv->overhead = 10; /* HDLC */
394			}
395
396			if (cfg->overhead == -1)
397				priv->overhead = 0;
398			else if (cfg->overhead > 0 &&
399			    cfg->overhead < MAX_OHSIZE)
400				priv->overhead = cfg->overhead;
401
402			if (cfg->header_offset == -1)
403				priv->header_offset = 0;
404			else if (cfg->header_offset > 0 &&
405			    cfg->header_offset < 64)
406				priv->header_offset = cfg->header_offset;
407
408			parse_cfg(&priv->upper.cfg, &cfg->downstream,
409			    &priv->upper, priv);
410			parse_cfg(&priv->lower.cfg, &cfg->upstream,
411			    &priv->lower, priv);
412			break;
413		default:
414			error = EINVAL;
415			break;
416		}
417		break;
418	default:
419		error = EINVAL;
420		break;
421	}
422	NG_RESPOND_MSG(error, node, item, resp);
423	NG_FREE_MSG(msg);
424
425	return (error);
426}
427
428static void
429parse_cfg(struct ng_pipe_hookcfg *current, struct ng_pipe_hookcfg *new,
430	struct hookinfo *hinfo, priv_p priv)
431{
432
433	if (new->ber == -1) {
434		current->ber = 0;
435		if (hinfo->ber_p) {
436			free(hinfo->ber_p, M_NG_PIPE);
437			hinfo->ber_p = NULL;
438		}
439	} else if (new->ber >= 1 && new->ber <= 1000000000000) {
440		static const uint64_t one = 0x1000000000000; /* = 2^48 */
441		uint64_t p0, p;
442		uint32_t fsize, i;
443
444		if (hinfo->ber_p == NULL)
445			hinfo->ber_p =
446			    malloc((MAX_FSIZE + MAX_OHSIZE) * sizeof(uint64_t),
447			    M_NG_PIPE, M_NOWAIT);
448		current->ber = new->ber;
449
450		/*
451		 * For given BER and each frame size N (in bytes) calculate
452		 * the probability P_OK that the frame is clean:
453		 *
454		 * P_OK(BER,N) = (1 - 1/BER)^(N*8)
455		 *
456		 * We use a 64-bit fixed-point format with decimal point
457		 * positioned between bits 47 and 48.
458		 */
459		p0 = one - one / new->ber;
460		p = one;
461		for (fsize = 0; fsize < MAX_FSIZE + MAX_OHSIZE; fsize++) {
462			hinfo->ber_p[fsize] = p;
463			for (i = 0; i < 8; i++)
464				p = (p * (p0 & 0xffff) >> 48) +
465				    (p * ((p0 >> 16) & 0xffff) >> 32) +
466				    (p * (p0 >> 32) >> 16);
467		}
468	}
469
470	if (new->qin_size_limit == -1)
471		current->qin_size_limit = 0;
472	else if (new->qin_size_limit >= 5)
473		current->qin_size_limit = new->qin_size_limit;
474
475	if (new->qout_size_limit == -1)
476		current->qout_size_limit = 0;
477	else if (new->qout_size_limit >= 5)
478		current->qout_size_limit = new->qout_size_limit;
479
480	if (new->duplicate == -1)
481		current->duplicate = 0;
482	else if (new->duplicate > 0 && new->duplicate <= 50)
483		current->duplicate = new->duplicate;
484
485	if (new->fifo) {
486		current->fifo = 1;
487		current->wfq = 0;
488		current->drr = 0;
489	}
490
491	if (new->wfq) {
492		current->fifo = 0;
493		current->wfq = 1;
494		current->drr = 0;
495	}
496
497	if (new->drr) {
498		current->fifo = 0;
499		current->wfq = 0;
500		/* DRR quantum */
501		if (new->drr >= 32)
502			current->drr = new->drr;
503		else
504			current->drr = 2048;		/* default quantum */
505	}
506
507	if (new->droptail) {
508		current->droptail = 1;
509		current->drophead = 0;
510	}
511
512	if (new->drophead) {
513		current->droptail = 0;
514		current->drophead = 1;
515	}
516
517	if (new->bandwidth == -1) {
518		current->bandwidth = 0;
519		current->fifo = 1;
520		current->wfq = 0;
521		current->drr = 0;
522	} else if (new->bandwidth >= 100 && new->bandwidth <= 1000000000)
523		current->bandwidth = new->bandwidth;
524
525	if (current->bandwidth | priv->delay |
526	    current->duplicate | current->ber)
527		hinfo->noqueue = 0;
528	else
529		hinfo->noqueue = 1;
530}
531
532/*
533 * Compute a hash signature for a packet. This function suffers from the
534 * NIH sindrome, so probably it would be wise to look around what other
535 * folks have found out to be a good and efficient IP hash function...
536 */
537static int
538ip_hash(struct mbuf *m, int offset)
539{
540	u_int64_t i;
541	struct ip *ip = (struct ip *)(mtod(m, u_char *) + offset);
542
543	if (m->m_len < sizeof(struct ip) + offset ||
544	    ip->ip_v != 4 || ip->ip_hl << 2 != sizeof(struct ip))
545		return 0;
546
547	i = ((u_int64_t) ip->ip_src.s_addr ^
548	    ((u_int64_t) ip->ip_src.s_addr << 13) ^
549	    ((u_int64_t) ip->ip_dst.s_addr << 7) ^
550	    ((u_int64_t) ip->ip_dst.s_addr << 19));
551	return (i ^ (i >> 32));
552}
553
554/*
555 * Receive data on a hook - both in upstream and downstream direction.
556 * We put the frame on the inbound queue, and try to initiate dequeuing
557 * sequence immediately. If inbound queue is full, discard one frame
558 * depending on dropping policy (from the head or from the tail of the
559 * queue).
560 */
561static int
562ngp_rcvdata(hook_p hook, item_p item)
563{
564	struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
565	const priv_p priv = NG_NODE_PRIVATE(NG_HOOK_NODE(hook));
566	struct timeval uuptime;
567	struct timeval *now = &uuptime;
568	struct ngp_fifo *ngp_f = NULL, *ngp_f1;
569	struct ngp_hdr *ngp_h = NULL;
570	struct mbuf *m;
571	int hash, plen;
572	int error = 0;
573
574	/*
575	 * Shortcut from inbound to outbound hook when neither of
576	 * bandwidth, delay, BER or duplication probability is
577	 * configured, nor we have queued frames to drain.
578	 */
579	if (hinfo->run.qin_frames == 0 && hinfo->run.qout_frames == 0 &&
580	    hinfo->noqueue) {
581		struct hookinfo *dest;
582		if (hinfo == &priv->lower)
583			dest = &priv->upper;
584		else
585			dest = &priv->lower;
586
587		/* Send the frame. */
588		plen = NGI_M(item)->m_pkthdr.len;
589		NG_FWD_ITEM_HOOK(error, item, dest->hook);
590
591		/* Update stats. */
592		if (error) {
593			hinfo->stats.out_disc_frames++;
594			hinfo->stats.out_disc_octets += plen;
595		} else {
596			hinfo->stats.fwd_frames++;
597			hinfo->stats.fwd_octets += plen;
598		}
599
600		return (error);
601	}
602
603	microuptime(now);
604
605	/*
606	 * If this was an empty queue, update service deadline time.
607	 */
608	if (hinfo->run.qin_frames == 0) {
609		struct timeval *when = &hinfo->qin_utime;
610		if (when->tv_sec < now->tv_sec || (when->tv_sec == now->tv_sec
611		    && when->tv_usec < now->tv_usec)) {
612			when->tv_sec = now->tv_sec;
613			when->tv_usec = now->tv_usec;
614		}
615	}
616
617	/* Populate the packet header */
618	ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
619	KASSERT((ngp_h != NULL), ("ngp_h zalloc failed (1)"));
620	NGI_GET_M(item, m);
621	KASSERT(m != NULL, ("NGI_GET_M failed"));
622	ngp_h->m = m;
623	NG_FREE_ITEM(item);
624
625	if (hinfo->cfg.fifo)
626		hash = 0;	/* all packets go into a single FIFO queue */
627	else
628		hash = ip_hash(m, priv->header_offset);
629
630	/* Find the appropriate FIFO queue for the packet and enqueue it*/
631	TAILQ_FOREACH(ngp_f, &hinfo->fifo_head, fifo_le)
632		if (hash == ngp_f->hash)
633			break;
634	if (ngp_f == NULL) {
635		ngp_f = uma_zalloc(ngp_zone, M_NOWAIT);
636		KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (2)"));
637		TAILQ_INIT(&ngp_f->packet_head);
638		ngp_f->hash = hash;
639		ngp_f->packets = 1;
640		ngp_f->rr_deficit = hinfo->cfg.drr;	/* DRR quantum */
641		hinfo->run.fifo_queues++;
642		TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
643		FIFO_VTIME_SORT(m->m_pkthdr.len);
644	} else {
645		TAILQ_INSERT_TAIL(&ngp_f->packet_head, ngp_h, ngp_link);
646		ngp_f->packets++;
647	}
648	hinfo->run.qin_frames++;
649	hinfo->run.qin_octets += m->m_pkthdr.len;
650
651	/* Discard a frame if inbound queue limit has been reached */
652	if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
653		struct mbuf *m1;
654		int longest = 0;
655
656		/* Find the longest queue */
657		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
658			if (ngp_f1->packets > longest) {
659				longest = ngp_f1->packets;
660				ngp_f = ngp_f1;
661			}
662
663		/* Drop a frame from the queue head/tail, depending on cfg */
664		if (hinfo->cfg.drophead)
665			ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
666		else
667			ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
668		TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
669		m1 = ngp_h->m;
670		uma_zfree(ngp_zone, ngp_h);
671		hinfo->run.qin_octets -= m1->m_pkthdr.len;
672		hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
673		m_freem(m1);
674		if (--(ngp_f->packets) == 0) {
675			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
676			uma_zfree(ngp_zone, ngp_f);
677			hinfo->run.fifo_queues--;
678		}
679		hinfo->run.qin_frames--;
680		hinfo->stats.in_disc_frames++;
681	} else if (hinfo->run.qin_frames > hinfo->cfg.qin_size_limit) {
682		struct mbuf *m1;
683		int longest = 0;
684
685		/* Find the longest queue */
686		TAILQ_FOREACH(ngp_f1, &hinfo->fifo_head, fifo_le)
687			if (ngp_f1->packets > longest) {
688				longest = ngp_f1->packets;
689				ngp_f = ngp_f1;
690			}
691
692		/* Drop a frame from the queue head/tail, depending on cfg */
693		if (hinfo->cfg.drophead)
694			ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
695		else
696			ngp_h = TAILQ_LAST(&ngp_f->packet_head, p_head);
697		TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
698		m1 = ngp_h->m;
699		uma_zfree(ngp_zone, ngp_h);
700		hinfo->run.qin_octets -= m1->m_pkthdr.len;
701		hinfo->stats.in_disc_octets += m1->m_pkthdr.len;
702		m_freem(m1);
703		if (--(ngp_f->packets) == 0) {
704			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
705			uma_zfree(ngp_zone, ngp_f);
706			hinfo->run.fifo_queues--;
707		}
708		hinfo->run.qin_frames--;
709		hinfo->stats.in_disc_frames++;
710	}
711
712	/*
713	 * Try to start the dequeuing process immediately.
714	 */
715	pipe_dequeue(hinfo, now);
716
717	return (0);
718}
719
720
721/*
722 * Dequeueing sequence - we basically do the following:
723 *  1) Try to extract the frame from the inbound (bandwidth) queue;
724 *  2) In accordance to BER specified, discard the frame randomly;
725 *  3) If the frame survives BER, prepend it with delay info and move it
726 *     to outbound (delay) queue;
727 *  4) Loop to 2) until bandwidth quota for this timeslice is reached, or
728 *     inbound queue is flushed completely;
729 *  5) Dequeue frames from the outbound queue and send them downstream until
730 *     outbound queue is flushed completely, or the next frame in the queue
731 *     is not due to be dequeued yet
732 */
733static void
734pipe_dequeue(struct hookinfo *hinfo, struct timeval *now) {
735	static uint64_t rand, oldrand;
736	const node_p node = NG_HOOK_NODE(hinfo->hook);
737	const priv_p priv = NG_NODE_PRIVATE(node);
738	struct hookinfo *dest;
739	struct ngp_fifo *ngp_f, *ngp_f1;
740	struct ngp_hdr *ngp_h;
741	struct timeval *when;
742	struct mbuf *m;
743	int plen, error = 0;
744
745	/* Which one is the destination hook? */
746	if (hinfo == &priv->lower)
747		dest = &priv->upper;
748	else
749		dest = &priv->lower;
750
751	/* Bandwidth queue processing */
752	while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
753		when = &hinfo->qin_utime;
754		if (when->tv_sec > now->tv_sec || (when->tv_sec == now->tv_sec
755		    && when->tv_usec > now->tv_usec))
756			break;
757
758		ngp_h = TAILQ_FIRST(&ngp_f->packet_head);
759		m = ngp_h->m;
760
761		/* Deficit Round Robin (DRR) processing */
762		if (hinfo->cfg.drr) {
763			if (ngp_f->rr_deficit >= m->m_pkthdr.len) {
764				ngp_f->rr_deficit -= m->m_pkthdr.len;
765			} else {
766				ngp_f->rr_deficit += hinfo->cfg.drr;
767				TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
768				TAILQ_INSERT_TAIL(&hinfo->fifo_head,
769				    ngp_f, fifo_le);
770				continue;
771			}
772		}
773
774		/*
775		 * Either create a duplicate and pass it on, or dequeue
776		 * the original packet...
777		 */
778		if (hinfo->cfg.duplicate &&
779		    random() % 100 <= hinfo->cfg.duplicate) {
780			ngp_h = uma_zalloc(ngp_zone, M_NOWAIT);
781			KASSERT(ngp_h != NULL, ("ngp_h zalloc failed (3)"));
782			m = m_dup(m, M_NOWAIT);
783			KASSERT(m != NULL, ("m_dup failed"));
784			ngp_h->m = m;
785		} else {
786			TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
787			hinfo->run.qin_frames--;
788			hinfo->run.qin_octets -= m->m_pkthdr.len;
789			ngp_f->packets--;
790		}
791
792		/* Calculate the serialization delay */
793		if (hinfo->cfg.bandwidth) {
794			hinfo->qin_utime.tv_usec +=
795			    ((uint64_t) m->m_pkthdr.len + priv->overhead ) *
796			    8000000 / hinfo->cfg.bandwidth;
797			hinfo->qin_utime.tv_sec +=
798			    hinfo->qin_utime.tv_usec / 1000000;
799			hinfo->qin_utime.tv_usec =
800			    hinfo->qin_utime.tv_usec % 1000000;
801		}
802		when = &ngp_h->when;
803		when->tv_sec = hinfo->qin_utime.tv_sec;
804		when->tv_usec = hinfo->qin_utime.tv_usec;
805
806		/* Sort / rearrange inbound queues */
807		if (ngp_f->packets) {
808			if (hinfo->cfg.wfq) {
809				TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
810				FIFO_VTIME_SORT(TAILQ_FIRST(
811				    &ngp_f->packet_head)->m->m_pkthdr.len)
812			}
813		} else {
814			TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
815			uma_zfree(ngp_zone, ngp_f);
816			hinfo->run.fifo_queues--;
817		}
818
819		/* Randomly discard the frame, according to BER setting */
820		if (hinfo->cfg.ber) {
821			oldrand = rand;
822			rand = random();
823			if (((oldrand ^ rand) << 17) >=
824			    hinfo->ber_p[priv->overhead + m->m_pkthdr.len]) {
825				hinfo->stats.out_disc_frames++;
826				hinfo->stats.out_disc_octets += m->m_pkthdr.len;
827				uma_zfree(ngp_zone, ngp_h);
828				m_freem(m);
829				continue;
830			}
831		}
832
833		/* Discard frame if outbound queue size limit exceeded */
834		if (hinfo->cfg.qout_size_limit &&
835		    hinfo->run.qout_frames>=hinfo->cfg.qout_size_limit) {
836			hinfo->stats.out_disc_frames++;
837			hinfo->stats.out_disc_octets += m->m_pkthdr.len;
838			uma_zfree(ngp_zone, ngp_h);
839			m_freem(m);
840			continue;
841		}
842
843		/* Calculate the propagation delay */
844		when->tv_usec += priv->delay;
845		when->tv_sec += when->tv_usec / 1000000;
846		when->tv_usec = when->tv_usec % 1000000;
847
848		/* Put the frame into the delay queue */
849		TAILQ_INSERT_TAIL(&hinfo->qout_head, ngp_h, ngp_link);
850		hinfo->run.qout_frames++;
851		hinfo->run.qout_octets += m->m_pkthdr.len;
852	}
853
854	/* Delay queue processing */
855	while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
856		when = &ngp_h->when;
857		m = ngp_h->m;
858		if (when->tv_sec > now->tv_sec ||
859		    (when->tv_sec == now->tv_sec &&
860		    when->tv_usec > now->tv_usec))
861			break;
862
863		/* Update outbound queue stats */
864		plen = m->m_pkthdr.len;
865		hinfo->run.qout_frames--;
866		hinfo->run.qout_octets -= plen;
867
868		/* Dequeue the packet from qout */
869		TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
870		uma_zfree(ngp_zone, ngp_h);
871
872		NG_SEND_DATA(error, dest->hook, m, meta);
873		if (error) {
874			hinfo->stats.out_disc_frames++;
875			hinfo->stats.out_disc_octets += plen;
876		} else {
877			hinfo->stats.fwd_frames++;
878			hinfo->stats.fwd_octets += plen;
879		}
880	}
881
882	if ((hinfo->run.qin_frames != 0 || hinfo->run.qout_frames != 0) &&
883	    !priv->timer_scheduled) {
884		ng_callout(&priv->timer, node, NULL, 1, ngp_callout, NULL, 0);
885		priv->timer_scheduled = 1;
886	}
887}
888
889/*
890 * This routine is called on every clock tick.  We poll connected hooks
891 * for queued frames by calling pipe_dequeue().
892 */
893static void
894ngp_callout(node_p node, hook_p hook, void *arg1, int arg2)
895{
896	const priv_p priv = NG_NODE_PRIVATE(node);
897	struct timeval now;
898
899	priv->timer_scheduled = 0;
900	microuptime(&now);
901	if (priv->upper.hook != NULL)
902		pipe_dequeue(&priv->upper, &now);
903	if (priv->lower.hook != NULL)
904		pipe_dequeue(&priv->lower, &now);
905}
906
907/*
908 * Shutdown processing
909 *
910 * This is tricky. If we have both a lower and upper hook, then we
911 * probably want to extricate ourselves and leave the two peers
912 * still linked to each other. Otherwise we should just shut down as
913 * a normal node would.
914 */
915static int
916ngp_shutdown(node_p node)
917{
918	const priv_p priv = NG_NODE_PRIVATE(node);
919
920	if (priv->timer_scheduled)
921		ng_uncallout(&priv->timer, node);
922	if (priv->lower.hook && priv->upper.hook)
923		ng_bypass(priv->lower.hook, priv->upper.hook);
924	else {
925		if (priv->upper.hook != NULL)
926			ng_rmhook_self(priv->upper.hook);
927		if (priv->lower.hook != NULL)
928			ng_rmhook_self(priv->lower.hook);
929	}
930	NG_NODE_UNREF(node);
931	free(priv, M_NG_PIPE);
932	return (0);
933}
934
935
936/*
937 * Hook disconnection
938 */
939static int
940ngp_disconnect(hook_p hook)
941{
942	struct hookinfo *const hinfo = NG_HOOK_PRIVATE(hook);
943	struct ngp_fifo *ngp_f;
944	struct ngp_hdr *ngp_h;
945
946	KASSERT(hinfo != NULL, ("%s: null info", __FUNCTION__));
947	hinfo->hook = NULL;
948
949	/* Flush all fifo queues associated with the hook */
950	while ((ngp_f = TAILQ_FIRST(&hinfo->fifo_head))) {
951		while ((ngp_h = TAILQ_FIRST(&ngp_f->packet_head))) {
952			TAILQ_REMOVE(&ngp_f->packet_head, ngp_h, ngp_link);
953			m_freem(ngp_h->m);
954			uma_zfree(ngp_zone, ngp_h);
955		}
956		TAILQ_REMOVE(&hinfo->fifo_head, ngp_f, fifo_le);
957		uma_zfree(ngp_zone, ngp_f);
958	}
959
960	/* Flush the delay queue */
961	while ((ngp_h = TAILQ_FIRST(&hinfo->qout_head))) {
962		TAILQ_REMOVE(&hinfo->qout_head, ngp_h, ngp_link);
963		m_freem(ngp_h->m);
964		uma_zfree(ngp_zone, ngp_h);
965	}
966
967	/* Release the packet loss probability table (BER) */
968	if (hinfo->ber_p)
969		free(hinfo->ber_p, M_NG_PIPE);
970
971	return (0);
972}
973
974static int
975ngp_modevent(module_t mod, int type, void *unused)
976{
977	int error = 0;
978
979	switch (type) {
980	case MOD_LOAD:
981		ngp_zone = uma_zcreate("ng_pipe", max(sizeof(struct ngp_hdr),
982		    sizeof (struct ngp_fifo)), NULL, NULL, NULL, NULL,
983		    UMA_ALIGN_PTR, 0);
984		if (ngp_zone == NULL)
985			panic("ng_pipe: couldn't allocate descriptor zone");
986		break;
987	case MOD_UNLOAD:
988		uma_zdestroy(ngp_zone);
989		break;
990	default:
991		error = EOPNOTSUPP;
992		break;
993	}
994
995	return (error);
996}
997