1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
5 * All rights reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29/*
30 * Dummynet portions related to packet handling.
31 */
32#include <sys/cdefs.h>
33#include "opt_inet6.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/mbuf.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/module.h>
42#include <sys/mutex.h>
43#include <sys/priv.h>
44#include <sys/proc.h>
45#include <sys/rwlock.h>
46#include <sys/socket.h>
47#include <sys/time.h>
48#include <sys/sysctl.h>
49
50#include <net/if.h>	/* IFNAMSIZ, struct ifaddr, ifq head, lock.h mutex.h */
51#include <net/if_var.h>	/* NET_EPOCH_... */
52#include <net/if_private.h>
53#include <net/netisr.h>
54#include <net/vnet.h>
55
56#include <netinet/in.h>
57#include <netinet/ip.h>		/* ip_len, ip_off */
58#include <netinet/ip_var.h>	/* ip_output(), IP_FORWARDING */
59#include <netinet/ip_fw.h>
60#include <netinet/ip_dummynet.h>
61#include <netinet/if_ether.h> /* various ether_* routines */
62#include <netinet/ip6.h>       /* for ip6_input, ip6_output prototypes */
63#include <netinet6/ip6_var.h>
64
65#include <netpfil/ipfw/ip_fw_private.h>
66#include <netpfil/ipfw/dn_heap.h>
67#include <netpfil/ipfw/ip_dn_private.h>
68#ifdef NEW_AQM
69#include <netpfil/ipfw/dn_aqm.h>
70#endif
71#include <netpfil/ipfw/dn_sched.h>
72
73/*
74 * We keep a private variable for the simulation time, but we could
75 * probably use an existing one ("softticks" in sys/kern/kern_timeout.c)
76 * instead of V_dn_cfg.curr_time
77 */
78VNET_DEFINE(struct dn_parms, dn_cfg);
79#define V_dn_cfg VNET(dn_cfg)
80
81/*
82 * We use a heap to store entities for which we have pending timer events.
83 * The heap is checked at every tick and all entities with expired events
84 * are extracted.
85 */
86
87MALLOC_DEFINE(M_DUMMYNET, "dummynet", "dummynet heap");
88
89extern	void (*bridge_dn_p)(struct mbuf *, struct ifnet *);
90
91#ifdef SYSCTL_NODE
92
93/*
94 * Because of the way the SYSBEGIN/SYSEND macros work on other
95 * platforms, there should not be functions between them.
96 * So keep the handlers outside the block.
97 */
98static int
99sysctl_hash_size(SYSCTL_HANDLER_ARGS)
100{
101	int error, value;
102
103	value = V_dn_cfg.hash_size;
104	error = sysctl_handle_int(oidp, &value, 0, req);
105	if (error != 0 || req->newptr == NULL)
106		return (error);
107	if (value < 16 || value > 65536)
108		return (EINVAL);
109	V_dn_cfg.hash_size = value;
110	return (0);
111}
112
113static int
114sysctl_limits(SYSCTL_HANDLER_ARGS)
115{
116	int error;
117	long value;
118
119	if (arg2 != 0)
120		value = V_dn_cfg.slot_limit;
121	else
122		value = V_dn_cfg.byte_limit;
123	error = sysctl_handle_long(oidp, &value, 0, req);
124
125	if (error != 0 || req->newptr == NULL)
126		return (error);
127	if (arg2 != 0) {
128		if (value < 1)
129			return (EINVAL);
130		V_dn_cfg.slot_limit = value;
131	} else {
132		if (value < 1500)
133			return (EINVAL);
134		V_dn_cfg.byte_limit = value;
135	}
136	return (0);
137}
138
139SYSBEGIN(f4)
140
141SYSCTL_DECL(_net_inet);
142SYSCTL_DECL(_net_inet_ip);
143#ifdef NEW_AQM
144SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
145    "Dummynet");
146#else
147static SYSCTL_NODE(_net_inet_ip, OID_AUTO, dummynet,
148    CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
149    "Dummynet");
150#endif
151
152/* wrapper to pass V_dn_cfg fields to SYSCTL_* */
153#define DC(x)	(&(VNET_NAME(dn_cfg).x))
154
155/* parameters */
156
157SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, hash_size,
158    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
159    0, 0, sysctl_hash_size, "I",
160    "Default hash table size");
161
162SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_slot_limit,
163    CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
164    0, 1, sysctl_limits, "L",
165    "Upper limit in slots for pipe queue.");
166SYSCTL_PROC(_net_inet_ip_dummynet, OID_AUTO, pipe_byte_limit,
167    CTLTYPE_LONG | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
168    0, 0, sysctl_limits, "L",
169    "Upper limit in bytes for pipe queue.");
170SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, io_fast,
171    CTLFLAG_RW | CTLFLAG_VNET, DC(io_fast), 0, "Enable fast dummynet io.");
172SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, debug,
173    CTLFLAG_RW | CTLFLAG_VNET, DC(debug), 0, "Dummynet debug level");
174
175/* RED parameters */
176SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_lookup_depth,
177    CTLFLAG_RD | CTLFLAG_VNET, DC(red_lookup_depth), 0, "Depth of RED lookup table");
178SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_avg_pkt_size,
179    CTLFLAG_RD | CTLFLAG_VNET, DC(red_avg_pkt_size), 0, "RED Medium packet size");
180SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, red_max_pkt_size,
181    CTLFLAG_RD | CTLFLAG_VNET, DC(red_max_pkt_size), 0, "RED Max packet size");
182
183/* time adjustment */
184SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta,
185    CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta), 0, "Last vs standard tick difference (usec).");
186SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_delta_sum,
187    CTLFLAG_RD | CTLFLAG_VNET, DC(tick_delta_sum), 0, "Accumulated tick difference (usec).");
188SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_adjustment,
189    CTLFLAG_RD | CTLFLAG_VNET, DC(tick_adjustment), 0, "Tick adjustments done.");
190SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_diff,
191    CTLFLAG_RD | CTLFLAG_VNET, DC(tick_diff), 0,
192    "Adjusted vs non-adjusted curr_time difference (ticks).");
193SYSCTL_LONG(_net_inet_ip_dummynet, OID_AUTO, tick_lost,
194    CTLFLAG_RD | CTLFLAG_VNET, DC(tick_lost), 0,
195    "Number of ticks coalesced by dummynet taskqueue.");
196
197/* Drain parameters */
198SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire,
199    CTLFLAG_RW | CTLFLAG_VNET, DC(expire), 0, "Expire empty queues/pipes");
200SYSCTL_UINT(_net_inet_ip_dummynet, OID_AUTO, expire_cycle,
201    CTLFLAG_RD | CTLFLAG_VNET, DC(expire_cycle), 0, "Expire cycle for queues/pipes");
202
203/* statistics */
204SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, schk_count,
205    CTLFLAG_RD | CTLFLAG_VNET, DC(schk_count), 0, "Number of schedulers");
206SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, si_count,
207    CTLFLAG_RD | CTLFLAG_VNET, DC(si_count), 0, "Number of scheduler instances");
208SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, fsk_count,
209    CTLFLAG_RD | CTLFLAG_VNET, DC(fsk_count), 0, "Number of flowsets");
210SYSCTL_INT(_net_inet_ip_dummynet, OID_AUTO, queue_count,
211    CTLFLAG_RD | CTLFLAG_VNET, DC(queue_count), 0, "Number of queues");
212SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt,
213    CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt), 0,
214    "Number of packets passed to dummynet.");
215SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_fast,
216    CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_fast), 0,
217    "Number of packets bypassed dummynet scheduler.");
218SYSCTL_ULONG(_net_inet_ip_dummynet, OID_AUTO, io_pkt_drop,
219    CTLFLAG_RD | CTLFLAG_VNET, DC(io_pkt_drop), 0,
220    "Number of packets dropped by dummynet.");
221#undef DC
222SYSEND
223
224#endif
225
226static void	dummynet_send(struct mbuf *);
227
228/*
229 * Return the mbuf tag holding the dummynet state (it should
230 * be the first one on the list).
231 */
232struct dn_pkt_tag *
233dn_tag_get(struct mbuf *m)
234{
235	struct m_tag *mtag = m_tag_first(m);
236#ifdef NEW_AQM
237	/* XXX: to skip ts m_tag. For Debugging only*/
238	if (mtag != NULL && mtag->m_tag_id == DN_AQM_MTAG_TS) {
239		m_tag_delete(m,mtag);
240		mtag = m_tag_first(m);
241		D("skip TS tag");
242	}
243#endif
244	KASSERT(mtag != NULL &&
245	    mtag->m_tag_cookie == MTAG_ABI_COMPAT &&
246	    mtag->m_tag_id == PACKET_TAG_DUMMYNET,
247	    ("packet on dummynet queue w/o dummynet tag!"));
248	return (struct dn_pkt_tag *)(mtag+1);
249}
250
251#ifndef NEW_AQM
252static inline void
253mq_append(struct mq *q, struct mbuf *m)
254{
255#ifdef USERSPACE
256	// buffers from netmap need to be copied
257	// XXX note that the routine is not expected to fail
258	ND("append %p to %p", m, q);
259	if (m->m_flags & M_STACK) {
260		struct mbuf *m_new;
261		void *p;
262		int l, ofs;
263
264		ofs = m->m_data - m->__m_extbuf;
265		// XXX allocate
266		MGETHDR(m_new, M_NOWAIT, MT_DATA);
267		ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
268			m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
269		p = m_new->__m_extbuf;	/* new pointer */
270		l = m_new->__m_extlen;	/* new len */
271		if (l <= m->__m_extlen) {
272			panic("extlen too large");
273		}
274
275		*m_new = *m;	// copy
276		m_new->m_flags &= ~M_STACK;
277		m_new->__m_extbuf = p; // point to new buffer
278		_pkt_copy(m->__m_extbuf, p, m->__m_extlen);
279		m_new->m_data = p + ofs;
280		m = m_new;
281	}
282#endif /* USERSPACE */
283	if (q->head == NULL)
284		q->head = m;
285	else
286		q->tail->m_nextpkt = m;
287	q->count++;
288	q->tail = m;
289	m->m_nextpkt = NULL;
290}
291#endif
292
293/*
294 * Dispose a list of packet. Use a functions so if we need to do
295 * more work, this is a central point to do it.
296 */
297void dn_free_pkts(struct mbuf *mnext)
298{
299        struct mbuf *m;
300
301        while ((m = mnext) != NULL) {
302                mnext = m->m_nextpkt;
303                FREE_PKT(m);
304        }
305}
306
307static int
308red_drops (struct dn_queue *q, int len)
309{
310	/*
311	 * RED algorithm
312	 *
313	 * RED calculates the average queue size (avg) using a low-pass filter
314	 * with an exponential weighted (w_q) moving average:
315	 * 	avg  <-  (1-w_q) * avg + w_q * q_size
316	 * where q_size is the queue length (measured in bytes or * packets).
317	 *
318	 * If q_size == 0, we compute the idle time for the link, and set
319	 *	avg = (1 - w_q)^(idle/s)
320	 * where s is the time needed for transmitting a medium-sized packet.
321	 *
322	 * Now, if avg < min_th the packet is enqueued.
323	 * If avg > max_th the packet is dropped. Otherwise, the packet is
324	 * dropped with probability P function of avg.
325	 */
326
327	struct dn_fsk *fs = q->fs;
328	int64_t p_b = 0;
329
330	/* Queue in bytes or packets? */
331	uint32_t q_size = (fs->fs.flags & DN_QSIZE_BYTES) ?
332	    q->ni.len_bytes : q->ni.length;
333
334	/* Average queue size estimation. */
335	if (q_size != 0) {
336		/* Queue is not empty, avg <- avg + (q_size - avg) * w_q */
337		int diff = SCALE(q_size) - q->avg;
338		int64_t v = SCALE_MUL((int64_t)diff, (int64_t)fs->w_q);
339
340		q->avg += (int)v;
341	} else {
342		/*
343		 * Queue is empty, find for how long the queue has been
344		 * empty and use a lookup table for computing
345		 * (1 - * w_q)^(idle_time/s) where s is the time to send a
346		 * (small) packet.
347		 * XXX check wraps...
348		 */
349		if (q->avg) {
350			u_int t = div64((V_dn_cfg.curr_time - q->q_time), fs->lookup_step);
351
352			q->avg = (t < fs->lookup_depth) ?
353			    SCALE_MUL(q->avg, fs->w_q_lookup[t]) : 0;
354		}
355	}
356
357	/* Should i drop? */
358	if (q->avg < fs->min_th) {
359		q->count = -1;
360		return (0);	/* accept packet */
361	}
362	if (q->avg >= fs->max_th) {	/* average queue >=  max threshold */
363		if (fs->fs.flags & DN_IS_ECN)
364			return (1);
365		if (fs->fs.flags & DN_IS_GENTLE_RED) {
366			/*
367			 * According to Gentle-RED, if avg is greater than
368			 * max_th the packet is dropped with a probability
369			 *	 p_b = c_3 * avg - c_4
370			 * where c_3 = (1 - max_p) / max_th
371			 *       c_4 = 1 - 2 * max_p
372			 */
373			p_b = SCALE_MUL((int64_t)fs->c_3, (int64_t)q->avg) -
374			    fs->c_4;
375		} else {
376			q->count = -1;
377			return (1);
378		}
379	} else if (q->avg > fs->min_th) {
380		if (fs->fs.flags & DN_IS_ECN)
381			return (1);
382		/*
383		 * We compute p_b using the linear dropping function
384		 *	 p_b = c_1 * avg - c_2
385		 * where c_1 = max_p / (max_th - min_th)
386		 * 	 c_2 = max_p * min_th / (max_th - min_th)
387		 */
388		p_b = SCALE_MUL((int64_t)fs->c_1, (int64_t)q->avg) - fs->c_2;
389	}
390
391	if (fs->fs.flags & DN_QSIZE_BYTES)
392		p_b = div64((p_b * len) , fs->max_pkt_size);
393	if (++q->count == 0)
394		q->random = random() & 0xffff;
395	else {
396		/*
397		 * q->count counts packets arrived since last drop, so a greater
398		 * value of q->count means a greater packet drop probability.
399		 */
400		if (SCALE_MUL(p_b, SCALE((int64_t)q->count)) > q->random) {
401			q->count = 0;
402			/* After a drop we calculate a new random value. */
403			q->random = random() & 0xffff;
404			return (1);	/* drop */
405		}
406	}
407	/* End of RED algorithm. */
408
409	return (0);	/* accept */
410
411}
412
413/*
414 * ECN/ECT Processing (partially adopted from altq)
415 */
416#ifndef NEW_AQM
417static
418#endif
419int
420ecn_mark(struct mbuf* m)
421{
422	struct ip *ip;
423	ip = (struct ip *)mtodo(m, dn_tag_get(m)->iphdr_off);
424
425	switch (ip->ip_v) {
426	case IPVERSION:
427	{
428		uint16_t old;
429
430		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_NOTECT)
431			return (0);	/* not-ECT */
432		if ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_CE)
433			return (1);	/* already marked */
434
435		/*
436		 * ecn-capable but not marked,
437		 * mark CE and update checksum
438		 */
439		old = *(uint16_t *)ip;
440		ip->ip_tos |= IPTOS_ECN_CE;
441		ip->ip_sum = cksum_adjust(ip->ip_sum, old, *(uint16_t *)ip);
442		return (1);
443	}
444#ifdef INET6
445	case (IPV6_VERSION >> 4):
446	{
447		struct ip6_hdr *ip6 = (struct ip6_hdr *)ip;
448		u_int32_t flowlabel;
449
450		flowlabel = ntohl(ip6->ip6_flow);
451		if ((flowlabel >> 28) != 6)
452			return (0);	/* version mismatch! */
453		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
454		    (IPTOS_ECN_NOTECT << 20))
455			return (0);	/* not-ECT */
456		if ((flowlabel & (IPTOS_ECN_MASK << 20)) ==
457		    (IPTOS_ECN_CE << 20))
458			return (1);	/* already marked */
459		/*
460		 * ecn-capable but not marked, mark CE
461		 */
462		flowlabel |= (IPTOS_ECN_CE << 20);
463		ip6->ip6_flow = htonl(flowlabel);
464		return (1);
465	}
466#endif
467	}
468	return (0);
469}
470
471/*
472 * Enqueue a packet in q, subject to space and queue management policy
473 * (whose parameters are in q->fs).
474 * Update stats for the queue and the scheduler.
475 * Return 0 on success, 1 on drop. The packet is consumed anyways.
476 */
477int
478dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop)
479{
480	struct dn_fs *f;
481	struct dn_flow *ni;	/* stats for scheduler instance */
482	uint64_t len;
483
484	if (q->fs == NULL || q->_si == NULL) {
485		printf("%s fs %p si %p, dropping\n",
486			__FUNCTION__, q->fs, q->_si);
487		FREE_PKT(m);
488		return 1;
489	}
490	f = &(q->fs->fs);
491	ni = &q->_si->ni;
492	len = m->m_pkthdr.len;
493	/* Update statistics, then check reasons to drop pkt. */
494	q->ni.tot_bytes += len;
495	q->ni.tot_pkts++;
496	ni->tot_bytes += len;
497	ni->tot_pkts++;
498	if (drop)
499		goto drop;
500	if (f->plr[0] || f->plr[1]) {
501		if (__predict_true(f->plr[1] == 0)) {
502			if (random() < f->plr[0])
503				goto drop;
504		} else {
505			switch (f->pl_state) {
506			case PLR_STATE_B:
507				if (random() < f->plr[3])
508					f->pl_state = PLR_STATE_G;
509				if (random() < f->plr[2])
510					goto drop;
511				break;
512			case PLR_STATE_G: /* FALLTHROUGH */
513			default:
514				if (random() < f->plr[1])
515					f->pl_state = PLR_STATE_B;
516				if (random() < f->plr[0])
517					goto drop;
518				break;
519			}
520		}
521	}
522	if (m->m_pkthdr.rcvif != NULL)
523		m_rcvif_serialize(m);
524#ifdef NEW_AQM
525	/* Call AQM enqueue function */
526	if (q->fs->aqmfp)
527		return q->fs->aqmfp->enqueue(q ,m);
528#endif
529	if (f->flags & DN_IS_RED && red_drops(q, m->m_pkthdr.len)) {
530		if (!(f->flags & DN_IS_ECN) || !ecn_mark(m))
531			goto drop;
532	}
533	if (f->flags & DN_QSIZE_BYTES) {
534		if (q->ni.len_bytes > f->qsize)
535			goto drop;
536	} else if (q->ni.length >= f->qsize) {
537		goto drop;
538	}
539	mq_append(&q->mq, m);
540	q->ni.length++;
541	q->ni.len_bytes += len;
542	ni->length++;
543	ni->len_bytes += len;
544	return (0);
545
546drop:
547	V_dn_cfg.io_pkt_drop++;
548	q->ni.drops++;
549	ni->drops++;
550	FREE_PKT(m);
551	return (1);
552}
553
554/*
555 * Fetch packets from the delay line which are due now. If there are
556 * leftover packets, reinsert the delay line in the heap.
557 * Runs under scheduler lock.
558 */
559static void
560transmit_event(struct mq *q, struct delay_line *dline, uint64_t now)
561{
562	struct mbuf *m;
563	struct dn_pkt_tag *pkt = NULL;
564
565	dline->oid.subtype = 0; /* not in heap */
566	while ((m = dline->mq.head) != NULL) {
567		pkt = dn_tag_get(m);
568		if (!DN_KEY_LEQ(pkt->output_time, now))
569			break;
570		dline->mq.head = m->m_nextpkt;
571		dline->mq.count--;
572		if (m->m_pkthdr.rcvif != NULL &&
573		  __predict_false(m_rcvif_restore(m) == NULL))
574			m_freem(m);
575		else
576			mq_append(q, m);
577	}
578	if (m != NULL) {
579		dline->oid.subtype = 1; /* in heap */
580		heap_insert(&V_dn_cfg.evheap, pkt->output_time, dline);
581	}
582}
583
584/*
585 * Convert the additional MAC overheads/delays into an equivalent
586 * number of bits for the given data rate. The samples are
587 * in milliseconds so we need to divide by 1000.
588 */
589static uint64_t
590extra_bits(struct mbuf *m, struct dn_schk *s)
591{
592	int index;
593	uint64_t bits;
594	struct dn_profile *pf = s->profile;
595
596	if (!pf || pf->samples_no == 0)
597		return 0;
598	index  = random() % pf->samples_no;
599	bits = div64((uint64_t)pf->samples[index] * s->link.bandwidth, 1000);
600	if (index >= pf->loss_level) {
601		struct dn_pkt_tag *dt = dn_tag_get(m);
602		if (dt)
603			dt->dn_dir = DIR_DROP;
604	}
605	return bits;
606}
607
608/*
609 * Send traffic from a scheduler instance due by 'now'.
610 * Return a pointer to the head of the queue.
611 */
612static struct mbuf *
613serve_sched(struct mq *q, struct dn_sch_inst *si, uint64_t now)
614{
615	struct mq def_q;
616	struct dn_schk *s = si->sched;
617	struct mbuf *m = NULL;
618	int delay_line_idle = (si->dline.mq.head == NULL);
619	int done;
620	uint32_t bw;
621
622	if (q == NULL) {
623		q = &def_q;
624		q->head = NULL;
625	}
626
627	bw = s->link.bandwidth;
628	si->kflags &= ~DN_ACTIVE;
629
630	if (bw > 0)
631		si->credit += (now - si->sched_time) * bw;
632	else
633		si->credit = 0;
634	si->sched_time = now;
635	done = 0;
636	while (si->credit >= 0 && (m = s->fp->dequeue(si)) != NULL) {
637		uint64_t len_scaled;
638
639		done++;
640		len_scaled = (bw == 0) ? 0 : hz *
641			(m->m_pkthdr.len * 8 + extra_bits(m, s));
642		si->credit -= len_scaled;
643		/* Move packet in the delay line */
644		dn_tag_get(m)->output_time = V_dn_cfg.curr_time + s->link.delay ;
645		if (m->m_pkthdr.rcvif != NULL)
646			m_rcvif_serialize(m);
647		mq_append(&si->dline.mq, m);
648	}
649
650	/*
651	 * If credit >= 0 the instance is idle, mark time.
652	 * Otherwise put back in the heap, and adjust the output
653	 * time of the last inserted packet, m, which was too early.
654	 */
655	if (si->credit >= 0) {
656		si->idle_time = now;
657	} else {
658		uint64_t t;
659		KASSERT (bw > 0, ("bw=0 and credit<0 ?"));
660		t = div64(bw - 1 - si->credit, bw);
661		if (m)
662			dn_tag_get(m)->output_time += t;
663		si->kflags |= DN_ACTIVE;
664		heap_insert(&V_dn_cfg.evheap, now + t, si);
665	}
666	if (delay_line_idle && done)
667		transmit_event(q, &si->dline, now);
668	return q->head;
669}
670
671/*
672 * The timer handler for dummynet. Time is computed in ticks, but
673 * but the code is tolerant to the actual rate at which this is called.
674 * Once complete, the function reschedules itself for the next tick.
675 */
676void
677dummynet_task(void *context, int pending)
678{
679	struct timeval t;
680	struct mq q = { NULL, NULL }; /* queue to accumulate results */
681	struct epoch_tracker et;
682
683	VNET_ITERATOR_DECL(vnet_iter);
684	VNET_LIST_RLOCK();
685	NET_EPOCH_ENTER(et);
686
687	VNET_FOREACH(vnet_iter) {
688		memset(&q, 0, sizeof(struct mq));
689		CURVNET_SET(vnet_iter);
690
691		if (! V_dn_cfg.init_done) {
692			CURVNET_RESTORE();
693			continue;
694		}
695
696		DN_BH_WLOCK();
697
698		/* Update number of lost(coalesced) ticks. */
699		V_dn_cfg.tick_lost += pending - 1;
700
701		getmicrouptime(&t);
702		/* Last tick duration (usec). */
703		V_dn_cfg.tick_last = (t.tv_sec - V_dn_cfg.prev_t.tv_sec) * 1000000 +
704		(t.tv_usec - V_dn_cfg.prev_t.tv_usec);
705		/* Last tick vs standard tick difference (usec). */
706		V_dn_cfg.tick_delta = (V_dn_cfg.tick_last * hz - 1000000) / hz;
707		/* Accumulated tick difference (usec). */
708		V_dn_cfg.tick_delta_sum += V_dn_cfg.tick_delta;
709
710		V_dn_cfg.prev_t = t;
711
712		/*
713		* Adjust curr_time if the accumulated tick difference is
714		* greater than the 'standard' tick. Since curr_time should
715		* be monotonically increasing, we do positive adjustments
716		* as required, and throttle curr_time in case of negative
717		* adjustment.
718		*/
719		V_dn_cfg.curr_time++;
720		if (V_dn_cfg.tick_delta_sum - tick >= 0) {
721			int diff = V_dn_cfg.tick_delta_sum / tick;
722
723			V_dn_cfg.curr_time += diff;
724			V_dn_cfg.tick_diff += diff;
725			V_dn_cfg.tick_delta_sum %= tick;
726			V_dn_cfg.tick_adjustment++;
727		} else if (V_dn_cfg.tick_delta_sum + tick <= 0) {
728			V_dn_cfg.curr_time--;
729			V_dn_cfg.tick_diff--;
730			V_dn_cfg.tick_delta_sum += tick;
731			V_dn_cfg.tick_adjustment++;
732		}
733
734		/* serve pending events, accumulate in q */
735		for (;;) {
736			struct dn_id *p;    /* generic parameter to handler */
737
738			if (V_dn_cfg.evheap.elements == 0 ||
739			    DN_KEY_LT(V_dn_cfg.curr_time, HEAP_TOP(&V_dn_cfg.evheap)->key))
740				break;
741			p = HEAP_TOP(&V_dn_cfg.evheap)->object;
742			heap_extract(&V_dn_cfg.evheap, NULL);
743			if (p->type == DN_SCH_I) {
744				serve_sched(&q, (struct dn_sch_inst *)p, V_dn_cfg.curr_time);
745			} else { /* extracted a delay line */
746				transmit_event(&q, (struct delay_line *)p, V_dn_cfg.curr_time);
747			}
748		}
749		if (V_dn_cfg.expire && ++V_dn_cfg.expire_cycle >= V_dn_cfg.expire) {
750			V_dn_cfg.expire_cycle = 0;
751			dn_drain_scheduler();
752			dn_drain_queue();
753		}
754		DN_BH_WUNLOCK();
755		if (q.head != NULL)
756			dummynet_send(q.head);
757
758		CURVNET_RESTORE();
759	}
760	NET_EPOCH_EXIT(et);
761	VNET_LIST_RUNLOCK();
762
763	/* Schedule our next run. */
764	dn_reschedule();
765}
766
767/*
768 * forward a chain of packets to the proper destination.
769 * This runs outside the dummynet lock.
770 */
771static void
772dummynet_send(struct mbuf *m)
773{
774	struct mbuf *n;
775
776	NET_EPOCH_ASSERT();
777
778	for (; m != NULL; m = n) {
779		struct ifnet *ifp = NULL;	/* gcc 3.4.6 complains */
780        	struct m_tag *tag;
781		int dst;
782
783		n = m->m_nextpkt;
784		m->m_nextpkt = NULL;
785		tag = m_tag_first(m);
786		if (tag == NULL) { /* should not happen */
787			dst = DIR_DROP;
788		} else {
789			struct dn_pkt_tag *pkt = dn_tag_get(m);
790			/* extract the dummynet info, rename the tag
791			 * to carry reinject info.
792			 */
793			ifp = ifnet_byindexgen(pkt->if_index, pkt->if_idxgen);
794			if (((pkt->dn_dir == (DIR_OUT | PROTO_LAYER2)) ||
795			    (pkt->dn_dir == (DIR_OUT | PROTO_LAYER2 | PROTO_IPV6))) &&
796				ifp == NULL) {
797				dst = DIR_DROP;
798			} else {
799				dst = pkt->dn_dir;
800				tag->m_tag_cookie = MTAG_IPFW_RULE;
801				tag->m_tag_id = 0;
802			}
803		}
804
805		switch (dst) {
806		case DIR_OUT:
807			ip_output(m, NULL, NULL, IP_FORWARDING, NULL, NULL);
808			break ;
809
810		case DIR_IN :
811			netisr_dispatch(NETISR_IP, m);
812			break;
813
814#ifdef INET6
815		case DIR_IN | PROTO_IPV6:
816			netisr_dispatch(NETISR_IPV6, m);
817			break;
818
819		case DIR_OUT | PROTO_IPV6:
820			ip6_output(m, NULL, NULL, IPV6_FORWARDING, NULL, NULL, NULL);
821			break;
822#endif
823
824		case DIR_FWD | PROTO_IFB: /* DN_TO_IFB_FWD: */
825			if (bridge_dn_p != NULL)
826				((*bridge_dn_p)(m, ifp));
827			else
828				printf("dummynet: if_bridge not loaded\n");
829
830			break;
831
832		case DIR_IN | PROTO_LAYER2 | PROTO_IPV6:
833		case DIR_IN | PROTO_LAYER2: /* DN_TO_ETH_DEMUX: */
834			/*
835			 * The Ethernet code assumes the Ethernet header is
836			 * contiguous in the first mbuf header.
837			 * Insure this is true.
838			 */
839			if (m->m_len < ETHER_HDR_LEN &&
840			    (m = m_pullup(m, ETHER_HDR_LEN)) == NULL) {
841				printf("dummynet/ether: pullup failed, "
842				    "dropping packet\n");
843				break;
844			}
845			ether_demux(m->m_pkthdr.rcvif, m);
846			break;
847
848		case DIR_OUT | PROTO_LAYER2 | PROTO_IPV6:
849		case DIR_OUT | PROTO_LAYER2: /* DN_TO_ETH_OUT: */
850			MPASS(ifp != NULL);
851			ether_output_frame(ifp, m);
852			break;
853
854		case DIR_DROP:
855			/* drop the packet after some time */
856			FREE_PKT(m);
857			break;
858
859		default:
860			printf("dummynet: bad switch %d!\n", dst);
861			FREE_PKT(m);
862			break;
863		}
864	}
865}
866
867static inline int
868tag_mbuf(struct mbuf *m, int dir, struct ip_fw_args *fwa)
869{
870	struct dn_pkt_tag *dt;
871	struct m_tag *mtag;
872
873	mtag = m_tag_get(PACKET_TAG_DUMMYNET,
874		    sizeof(*dt), M_NOWAIT | M_ZERO);
875	if (mtag == NULL)
876		return 1;		/* Cannot allocate packet header. */
877	m_tag_prepend(m, mtag);		/* Attach to mbuf chain. */
878	dt = (struct dn_pkt_tag *)(mtag + 1);
879	dt->rule = fwa->rule;
880	/* only keep this info */
881	dt->rule.info &= (IPFW_ONEPASS | IPFW_IS_DUMMYNET);
882	dt->dn_dir = dir;
883	if (fwa->flags & IPFW_ARGS_OUT && fwa->ifp != NULL) {
884		NET_EPOCH_ASSERT();
885		dt->if_index = fwa->ifp->if_index;
886		dt->if_idxgen = fwa->ifp->if_idxgen;
887	}
888	/* dt->output_time is updated as we move through */
889	dt->output_time = V_dn_cfg.curr_time;
890	dt->iphdr_off = (dir & PROTO_LAYER2) ? ETHER_HDR_LEN : 0;
891	return 0;
892}
893
894/*
895 * dummynet hook for packets.
896 * We use the argument to locate the flowset fs and the sched_set sch
897 * associated to it. The we apply flow_mask and sched_mask to
898 * determine the queue and scheduler instances.
899 */
900int
901dummynet_io(struct mbuf **m0, struct ip_fw_args *fwa)
902{
903	struct mbuf *m = *m0;
904	struct dn_fsk *fs = NULL;
905	struct dn_sch_inst *si;
906	struct dn_queue *q = NULL;	/* default */
907	int fs_id, dir;
908
909	fs_id = (fwa->rule.info & IPFW_INFO_MASK) +
910		((fwa->rule.info & IPFW_IS_PIPE) ? 2*DN_MAX_ID : 0);
911	/* XXXGL: convert args to dir */
912	if (fwa->flags & IPFW_ARGS_IN)
913		dir = DIR_IN;
914	else
915		dir = DIR_OUT;
916	if (fwa->flags & IPFW_ARGS_ETHER)
917		dir |= PROTO_LAYER2;
918	else if (fwa->flags & IPFW_ARGS_IP6)
919		dir |= PROTO_IPV6;
920	DN_BH_WLOCK();
921	V_dn_cfg.io_pkt++;
922	/* we could actually tag outside the lock, but who cares... */
923	if (tag_mbuf(m, dir, fwa))
924		goto dropit;
925	/* XXX locate_flowset could be optimised with a direct ref. */
926	fs = dn_ht_find(V_dn_cfg.fshash, fs_id, 0, NULL);
927	if (fs == NULL)
928		goto dropit;	/* This queue/pipe does not exist! */
929	if (fs->sched == NULL)	/* should not happen */
930		goto dropit;
931	/* find scheduler instance, possibly applying sched_mask */
932	si = ipdn_si_find(fs->sched, &(fwa->f_id));
933	if (si == NULL)
934		goto dropit;
935	/*
936	 * If the scheduler supports multiple queues, find the right one
937	 * (otherwise it will be ignored by enqueue).
938	 */
939	if (fs->sched->fp->flags & DN_MULTIQUEUE) {
940		q = ipdn_q_find(fs, si, &(fwa->f_id));
941		if (q == NULL)
942			goto dropit;
943	}
944	if (fs->sched->fp->enqueue(si, q, m)) {
945		/* packet was dropped by enqueue() */
946		m = *m0 = NULL;
947
948		/* dn_enqueue already increases io_pkt_drop */
949		V_dn_cfg.io_pkt_drop--;
950
951		goto dropit;
952	}
953
954	if (si->kflags & DN_ACTIVE) {
955		m = *m0 = NULL; /* consumed */
956		goto done; /* already active, nothing to do */
957	}
958
959	/* compute the initial allowance */
960	if (si->idle_time < V_dn_cfg.curr_time) {
961	    /* Do this only on the first packet on an idle pipe */
962	    struct dn_link *p = &fs->sched->link;
963
964	    si->sched_time = V_dn_cfg.curr_time;
965	    si->credit = V_dn_cfg.io_fast ? p->bandwidth : 0;
966	    if (p->burst) {
967		uint64_t burst = (V_dn_cfg.curr_time - si->idle_time) * p->bandwidth;
968		if (burst > p->burst)
969			burst = p->burst;
970		si->credit += burst;
971	    }
972	}
973	/* pass through scheduler and delay line */
974	m = serve_sched(NULL, si, V_dn_cfg.curr_time);
975
976	/* optimization -- pass it back to ipfw for immediate send */
977	/* XXX Don't call dummynet_send() if scheduler return the packet
978	 *     just enqueued. This avoid a lock order reversal.
979	 *
980	 */
981	if (/*V_dn_cfg.io_fast &&*/ m == *m0 && (dir & PROTO_LAYER2) == 0 ) {
982		/* fast io, rename the tag * to carry reinject info. */
983		struct m_tag *tag = m_tag_first(m);
984
985		tag->m_tag_cookie = MTAG_IPFW_RULE;
986		tag->m_tag_id = 0;
987		V_dn_cfg.io_pkt_fast++;
988		if (m->m_nextpkt != NULL) {
989			printf("dummynet: fast io: pkt chain detected!\n");
990			m->m_nextpkt = NULL;
991		}
992		m = NULL;
993	} else {
994		*m0 = NULL;
995	}
996done:
997	DN_BH_WUNLOCK();
998	if (m)
999		dummynet_send(m);
1000	return 0;
1001
1002dropit:
1003	V_dn_cfg.io_pkt_drop++;
1004	DN_BH_WUNLOCK();
1005	if (m)
1006		FREE_PKT(m);
1007	*m0 = NULL;
1008	return (fs && (fs->fs.flags & DN_NOERROR)) ? 0 : ENOBUFS;
1009}
1010