1/*	$FreeBSD$	*/
2/*	$KAME: altq_rio.c,v 1.17 2003/07/10 12:07:49 kjc Exp $	*/
3
4/*
5 * Copyright (C) 1998-2003
6 *	Sony Computer Science Laboratories Inc.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29/*
30 * Copyright (c) 1990-1994 Regents of the University of California.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 *    notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 *    notice, this list of conditions and the following disclaimer in the
40 *    documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 *    must display the following acknowledgement:
43 *	This product includes software developed by the Computer Systems
44 *	Engineering Group at Lawrence Berkeley Laboratory.
45 * 4. Neither the name of the University nor of the Laboratory may be used
46 *    to endorse or promote products derived from this software without
47 *    specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 * SUCH DAMAGE.
60 */
61
62#if defined(__FreeBSD__) || defined(__NetBSD__)
63#include "opt_altq.h"
64#include "opt_inet.h"
65#ifdef __FreeBSD__
66#include "opt_inet6.h"
67#endif
68#endif /* __FreeBSD__ || __NetBSD__ */
69#ifdef ALTQ_RIO	/* rio is enabled by ALTQ_RIO option in opt_altq.h */
70
71#include <sys/param.h>
72#include <sys/malloc.h>
73#include <sys/mbuf.h>
74#include <sys/socket.h>
75#include <sys/systm.h>
76#include <sys/errno.h>
77#if 1 /* ALTQ3_COMPAT */
78#include <sys/proc.h>
79#include <sys/sockio.h>
80#include <sys/kernel.h>
81#endif
82
83#include <net/if.h>
84
85#include <netinet/in.h>
86#include <netinet/in_systm.h>
87#include <netinet/ip.h>
88#ifdef INET6
89#include <netinet/ip6.h>
90#endif
91
92#include <net/pfvar.h>
93#include <altq/altq.h>
94#include <altq/altq_cdnr.h>
95#include <altq/altq_red.h>
96#include <altq/altq_rio.h>
97#ifdef ALTQ3_COMPAT
98#include <altq/altq_conf.h>
99#endif
100
101/*
102 * RIO: RED with IN/OUT bit
103 *   described in
104 *	"Explicit Allocation of Best Effort Packet Delivery Service"
105 *	David D. Clark and Wenjia Fang, MIT Lab for Computer Science
106 *	http://diffserv.lcs.mit.edu/Papers/exp-alloc-ddc-wf.{ps,pdf}
107 *
108 * this implementation is extended to support more than 2 drop precedence
109 * values as described in RFC2597 (Assured Forwarding PHB Group).
110 *
111 */
112/*
113 * AF DS (differentiated service) codepoints.
114 * (classes can be mapped to CBQ or H-FSC classes.)
115 *
116 *      0   1   2   3   4   5   6   7
117 *    +---+---+---+---+---+---+---+---+
118 *    |   CLASS   |DropPre| 0 |  CU   |
119 *    +---+---+---+---+---+---+---+---+
120 *
121 *    class 1: 001
122 *    class 2: 010
123 *    class 3: 011
124 *    class 4: 100
125 *
126 *    low drop prec:    01
127 *    medium drop prec: 10
128 *    high drop prec:   01
129 */
130
131/* normal red parameters */
132#define	W_WEIGHT	512	/* inverse of weight of EWMA (511/512) */
133				/* q_weight = 0.00195 */
134
135/* red parameters for a slow link */
136#define	W_WEIGHT_1	128	/* inverse of weight of EWMA (127/128) */
137				/* q_weight = 0.0078125 */
138
139/* red parameters for a very slow link (e.g., dialup) */
140#define	W_WEIGHT_2	64	/* inverse of weight of EWMA (63/64) */
141				/* q_weight = 0.015625 */
142
143/* fixed-point uses 12-bit decimal places */
144#define	FP_SHIFT	12	/* fixed-point shift */
145
146/* red parameters for drop probability */
147#define	INV_P_MAX	10	/* inverse of max drop probability */
148#define	TH_MIN		 5	/* min threshold */
149#define	TH_MAX		15	/* max threshold */
150
151#define	RIO_LIMIT	60	/* default max queue lenght */
152#define	RIO_STATS		/* collect statistics */
153
154#define	TV_DELTA(a, b, delta) {					\
155	register int	xxs;					\
156								\
157	delta = (a)->tv_usec - (b)->tv_usec; 			\
158	if ((xxs = (a)->tv_sec - (b)->tv_sec) != 0) { 		\
159		if (xxs < 0) { 					\
160			delta = 60000000;			\
161		} else if (xxs > 4)  {				\
162			if (xxs > 60)				\
163				delta = 60000000;		\
164			else					\
165				delta += xxs * 1000000;		\
166		} else while (xxs > 0) {			\
167			delta += 1000000;			\
168			xxs--;					\
169		}						\
170	}							\
171}
172
173#ifdef ALTQ3_COMPAT
174/* rio_list keeps all rio_queue_t's allocated. */
175static rio_queue_t *rio_list = NULL;
176#endif
177/* default rio parameter values */
178static struct redparams default_rio_params[RIO_NDROPPREC] = {
179  /* th_min,		 th_max,     inv_pmax */
180  { TH_MAX * 2 + TH_MIN, TH_MAX * 3, INV_P_MAX }, /* low drop precedence */
181  { TH_MAX + TH_MIN,	 TH_MAX * 2, INV_P_MAX }, /* medium drop precedence */
182  { TH_MIN,		 TH_MAX,     INV_P_MAX }  /* high drop precedence */
183};
184
185/* internal function prototypes */
186static int dscp2index(u_int8_t);
187#ifdef ALTQ3_COMPAT
188static int rio_enqueue(struct ifaltq *, struct mbuf *, struct altq_pktattr *);
189static struct mbuf *rio_dequeue(struct ifaltq *, int);
190static int rio_request(struct ifaltq *, int, void *);
191static int rio_detach(rio_queue_t *);
192
193/*
194 * rio device interface
195 */
196altqdev_decl(rio);
197
198#endif /* ALTQ3_COMPAT */
199
200rio_t *
201rio_alloc(int weight, struct redparams *params, int flags, int pkttime)
202{
203	rio_t	*rp;
204	int	 w, i;
205	int	 npkts_per_sec;
206
207	rp = malloc(sizeof(rio_t), M_DEVBUF, M_NOWAIT | M_ZERO);
208	if (rp == NULL)
209		return (NULL);
210
211	rp->rio_flags = flags;
212	if (pkttime == 0)
213		/* default packet time: 1000 bytes / 10Mbps * 8 * 1000000 */
214		rp->rio_pkttime = 800;
215	else
216		rp->rio_pkttime = pkttime;
217
218	if (weight != 0)
219		rp->rio_weight = weight;
220	else {
221		/* use default */
222		rp->rio_weight = W_WEIGHT;
223
224		/* when the link is very slow, adjust red parameters */
225		npkts_per_sec = 1000000 / rp->rio_pkttime;
226		if (npkts_per_sec < 50) {
227			/* up to about 400Kbps */
228			rp->rio_weight = W_WEIGHT_2;
229		} else if (npkts_per_sec < 300) {
230			/* up to about 2.4Mbps */
231			rp->rio_weight = W_WEIGHT_1;
232		}
233	}
234
235	/* calculate wshift.  weight must be power of 2 */
236	w = rp->rio_weight;
237	for (i = 0; w > 1; i++)
238		w = w >> 1;
239	rp->rio_wshift = i;
240	w = 1 << rp->rio_wshift;
241	if (w != rp->rio_weight) {
242		printf("invalid weight value %d for red! use %d\n",
243		       rp->rio_weight, w);
244		rp->rio_weight = w;
245	}
246
247	/* allocate weight table */
248	rp->rio_wtab = wtab_alloc(rp->rio_weight);
249
250	for (i = 0; i < RIO_NDROPPREC; i++) {
251		struct dropprec_state *prec = &rp->rio_precstate[i];
252
253		prec->avg = 0;
254		prec->idle = 1;
255
256		if (params == NULL || params[i].inv_pmax == 0)
257			prec->inv_pmax = default_rio_params[i].inv_pmax;
258		else
259			prec->inv_pmax = params[i].inv_pmax;
260		if (params == NULL || params[i].th_min == 0)
261			prec->th_min = default_rio_params[i].th_min;
262		else
263			prec->th_min = params[i].th_min;
264		if (params == NULL || params[i].th_max == 0)
265			prec->th_max = default_rio_params[i].th_max;
266		else
267			prec->th_max = params[i].th_max;
268
269		/*
270		 * th_min_s and th_max_s are scaled versions of th_min
271		 * and th_max to be compared with avg.
272		 */
273		prec->th_min_s = prec->th_min << (rp->rio_wshift + FP_SHIFT);
274		prec->th_max_s = prec->th_max << (rp->rio_wshift + FP_SHIFT);
275
276		/*
277		 * precompute probability denominator
278		 *  probd = (2 * (TH_MAX-TH_MIN) / pmax) in fixed-point
279		 */
280		prec->probd = (2 * (prec->th_max - prec->th_min)
281			       * prec->inv_pmax) << FP_SHIFT;
282
283		microtime(&prec->last);
284	}
285
286	return (rp);
287}
288
289void
290rio_destroy(rio_t *rp)
291{
292	wtab_destroy(rp->rio_wtab);
293	free(rp, M_DEVBUF);
294}
295
296void
297rio_getstats(rio_t *rp, struct redstats *sp)
298{
299	int	i;
300
301	for (i = 0; i < RIO_NDROPPREC; i++) {
302		bcopy(&rp->q_stats[i], sp, sizeof(struct redstats));
303		sp->q_avg = rp->rio_precstate[i].avg >> rp->rio_wshift;
304		sp++;
305	}
306}
307
308#if (RIO_NDROPPREC == 3)
309/*
310 * internally, a drop precedence value is converted to an index
311 * starting from 0.
312 */
313static int
314dscp2index(u_int8_t dscp)
315{
316	int	dpindex = dscp & AF_DROPPRECMASK;
317
318	if (dpindex == 0)
319		return (0);
320	return ((dpindex >> 3) - 1);
321}
322#endif
323
324#if 1
325/*
326 * kludge: when a packet is dequeued, we need to know its drop precedence
327 * in order to keep the queue length of each drop precedence.
328 * use m_pkthdr.rcvif to pass this info.
329 */
330#define	RIOM_SET_PRECINDEX(m, idx)	\
331	do { (m)->m_pkthdr.rcvif = (void *)((long)(idx)); } while (0)
332#define	RIOM_GET_PRECINDEX(m)	\
333	({ long idx; idx = (long)((m)->m_pkthdr.rcvif); \
334	(m)->m_pkthdr.rcvif = NULL; idx; })
335#endif
336
337int
338rio_addq(rio_t *rp, class_queue_t *q, struct mbuf *m,
339    struct altq_pktattr *pktattr)
340{
341	int			 avg, droptype;
342	u_int8_t		 dsfield, odsfield;
343	int			 dpindex, i, n, t;
344	struct timeval		 now;
345	struct dropprec_state	*prec;
346
347	dsfield = odsfield = read_dsfield(m, pktattr);
348	dpindex = dscp2index(dsfield);
349
350	/*
351	 * update avg of the precedence states whose drop precedence
352	 * is larger than or equal to the drop precedence of the packet
353	 */
354	now.tv_sec = 0;
355	for (i = dpindex; i < RIO_NDROPPREC; i++) {
356		prec = &rp->rio_precstate[i];
357		avg = prec->avg;
358		if (prec->idle) {
359			prec->idle = 0;
360			if (now.tv_sec == 0)
361				microtime(&now);
362			t = (now.tv_sec - prec->last.tv_sec);
363			if (t > 60)
364				avg = 0;
365			else {
366				t = t * 1000000 +
367					(now.tv_usec - prec->last.tv_usec);
368				n = t / rp->rio_pkttime;
369				/* calculate (avg = (1 - Wq)^n * avg) */
370				if (n > 0)
371					avg = (avg >> FP_SHIFT) *
372						pow_w(rp->rio_wtab, n);
373			}
374		}
375
376		/* run estimator. (avg is scaled by WEIGHT in fixed-point) */
377		avg += (prec->qlen << FP_SHIFT) - (avg >> rp->rio_wshift);
378		prec->avg = avg;		/* save the new value */
379		/*
380		 * count keeps a tally of arriving traffic that has not
381		 * been dropped.
382		 */
383		prec->count++;
384	}
385
386	prec = &rp->rio_precstate[dpindex];
387	avg = prec->avg;
388
389	/* see if we drop early */
390	droptype = DTYPE_NODROP;
391	if (avg >= prec->th_min_s && prec->qlen > 1) {
392		if (avg >= prec->th_max_s) {
393			/* avg >= th_max: forced drop */
394			droptype = DTYPE_FORCED;
395		} else if (prec->old == 0) {
396			/* first exceeds th_min */
397			prec->count = 1;
398			prec->old = 1;
399		} else if (drop_early((avg - prec->th_min_s) >> rp->rio_wshift,
400				      prec->probd, prec->count)) {
401			/* unforced drop by red */
402			droptype = DTYPE_EARLY;
403		}
404	} else {
405		/* avg < th_min */
406		prec->old = 0;
407	}
408
409	/*
410	 * if the queue length hits the hard limit, it's a forced drop.
411	 */
412	if (droptype == DTYPE_NODROP && qlen(q) >= qlimit(q))
413		droptype = DTYPE_FORCED;
414
415	if (droptype != DTYPE_NODROP) {
416		/* always drop incoming packet (as opposed to randomdrop) */
417		for (i = dpindex; i < RIO_NDROPPREC; i++)
418			rp->rio_precstate[i].count = 0;
419#ifdef RIO_STATS
420		if (droptype == DTYPE_EARLY)
421			rp->q_stats[dpindex].drop_unforced++;
422		else
423			rp->q_stats[dpindex].drop_forced++;
424		PKTCNTR_ADD(&rp->q_stats[dpindex].drop_cnt, m_pktlen(m));
425#endif
426		m_freem(m);
427		return (-1);
428	}
429
430	for (i = dpindex; i < RIO_NDROPPREC; i++)
431		rp->rio_precstate[i].qlen++;
432
433	/* save drop precedence index in mbuf hdr */
434	RIOM_SET_PRECINDEX(m, dpindex);
435
436	if (rp->rio_flags & RIOF_CLEARDSCP)
437		dsfield &= ~DSCP_MASK;
438
439	if (dsfield != odsfield)
440		write_dsfield(m, pktattr, dsfield);
441
442	_addq(q, m);
443
444#ifdef RIO_STATS
445	PKTCNTR_ADD(&rp->q_stats[dpindex].xmit_cnt, m_pktlen(m));
446#endif
447	return (0);
448}
449
450struct mbuf *
451rio_getq(rio_t *rp, class_queue_t *q)
452{
453	struct mbuf	*m;
454	int		 dpindex, i;
455
456	if ((m = _getq(q)) == NULL)
457		return NULL;
458
459	dpindex = RIOM_GET_PRECINDEX(m);
460	for (i = dpindex; i < RIO_NDROPPREC; i++) {
461		if (--rp->rio_precstate[i].qlen == 0) {
462			if (rp->rio_precstate[i].idle == 0) {
463				rp->rio_precstate[i].idle = 1;
464				microtime(&rp->rio_precstate[i].last);
465			}
466		}
467	}
468	return (m);
469}
470
471#ifdef ALTQ3_COMPAT
472int
473rioopen(dev, flag, fmt, p)
474	dev_t dev;
475	int flag, fmt;
476#if (__FreeBSD_version > 500000)
477	struct thread *p;
478#else
479	struct proc *p;
480#endif
481{
482	/* everything will be done when the queueing scheme is attached. */
483	return 0;
484}
485
486int
487rioclose(dev, flag, fmt, p)
488	dev_t dev;
489	int flag, fmt;
490#if (__FreeBSD_version > 500000)
491	struct thread *p;
492#else
493	struct proc *p;
494#endif
495{
496	rio_queue_t *rqp;
497	int err, error = 0;
498
499	while ((rqp = rio_list) != NULL) {
500		/* destroy all */
501		err = rio_detach(rqp);
502		if (err != 0 && error == 0)
503			error = err;
504	}
505
506	return error;
507}
508
509int
510rioioctl(dev, cmd, addr, flag, p)
511	dev_t dev;
512	ioctlcmd_t cmd;
513	caddr_t addr;
514	int flag;
515#if (__FreeBSD_version > 500000)
516	struct thread *p;
517#else
518	struct proc *p;
519#endif
520{
521	rio_queue_t *rqp;
522	struct rio_interface *ifacep;
523	struct ifnet *ifp;
524	int	error = 0;
525
526	/* check super-user privilege */
527	switch (cmd) {
528	case RIO_GETSTATS:
529		break;
530	default:
531#if (__FreeBSD_version > 700000)
532		if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
533			return (error);
534#elsif (__FreeBSD_version > 400000)
535		if ((error = suser(p)) != 0)
536			return (error);
537#else
538		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
539			return (error);
540#endif
541		break;
542	}
543
544	switch (cmd) {
545
546	case RIO_ENABLE:
547		ifacep = (struct rio_interface *)addr;
548		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
549			error = EBADF;
550			break;
551		}
552		error = altq_enable(rqp->rq_ifq);
553		break;
554
555	case RIO_DISABLE:
556		ifacep = (struct rio_interface *)addr;
557		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
558			error = EBADF;
559			break;
560		}
561		error = altq_disable(rqp->rq_ifq);
562		break;
563
564	case RIO_IF_ATTACH:
565		ifp = ifunit(((struct rio_interface *)addr)->rio_ifname);
566		if (ifp == NULL) {
567			error = ENXIO;
568			break;
569		}
570
571		/* allocate and initialize rio_queue_t */
572		rqp = malloc(sizeof(rio_queue_t), M_DEVBUF, M_WAITOK);
573		if (rqp == NULL) {
574			error = ENOMEM;
575			break;
576		}
577		bzero(rqp, sizeof(rio_queue_t));
578
579		rqp->rq_q = malloc(sizeof(class_queue_t),
580		       M_DEVBUF, M_WAITOK);
581		if (rqp->rq_q == NULL) {
582			free(rqp, M_DEVBUF);
583			error = ENOMEM;
584			break;
585		}
586		bzero(rqp->rq_q, sizeof(class_queue_t));
587
588		rqp->rq_rio = rio_alloc(0, NULL, 0, 0);
589		if (rqp->rq_rio == NULL) {
590			free(rqp->rq_q, M_DEVBUF);
591			free(rqp, M_DEVBUF);
592			error = ENOMEM;
593			break;
594		}
595
596		rqp->rq_ifq = &ifp->if_snd;
597		qtail(rqp->rq_q) = NULL;
598		qlen(rqp->rq_q) = 0;
599		qlimit(rqp->rq_q) = RIO_LIMIT;
600		qtype(rqp->rq_q) = Q_RIO;
601
602		/*
603		 * set RIO to this ifnet structure.
604		 */
605		error = altq_attach(rqp->rq_ifq, ALTQT_RIO, rqp,
606				    rio_enqueue, rio_dequeue, rio_request,
607				    NULL, NULL);
608		if (error) {
609			rio_destroy(rqp->rq_rio);
610			free(rqp->rq_q, M_DEVBUF);
611			free(rqp, M_DEVBUF);
612			break;
613		}
614
615		/* add this state to the rio list */
616		rqp->rq_next = rio_list;
617		rio_list = rqp;
618		break;
619
620	case RIO_IF_DETACH:
621		ifacep = (struct rio_interface *)addr;
622		if ((rqp = altq_lookup(ifacep->rio_ifname, ALTQT_RIO)) == NULL) {
623			error = EBADF;
624			break;
625		}
626		error = rio_detach(rqp);
627		break;
628
629	case RIO_GETSTATS:
630		do {
631			struct rio_stats *q_stats;
632			rio_t *rp;
633			int i;
634
635			q_stats = (struct rio_stats *)addr;
636			if ((rqp = altq_lookup(q_stats->iface.rio_ifname,
637					       ALTQT_RIO)) == NULL) {
638				error = EBADF;
639				break;
640			}
641
642			rp = rqp->rq_rio;
643
644			q_stats->q_limit = qlimit(rqp->rq_q);
645			q_stats->weight	= rp->rio_weight;
646			q_stats->flags = rp->rio_flags;
647
648			for (i = 0; i < RIO_NDROPPREC; i++) {
649				q_stats->q_len[i] = rp->rio_precstate[i].qlen;
650				bcopy(&rp->q_stats[i], &q_stats->q_stats[i],
651				      sizeof(struct redstats));
652				q_stats->q_stats[i].q_avg =
653				    rp->rio_precstate[i].avg >> rp->rio_wshift;
654
655				q_stats->q_params[i].inv_pmax
656					= rp->rio_precstate[i].inv_pmax;
657				q_stats->q_params[i].th_min
658					= rp->rio_precstate[i].th_min;
659				q_stats->q_params[i].th_max
660					= rp->rio_precstate[i].th_max;
661			}
662		} while (/*CONSTCOND*/ 0);
663		break;
664
665	case RIO_CONFIG:
666		do {
667			struct rio_conf *fc;
668			rio_t	*new;
669			int s, limit, i;
670
671			fc = (struct rio_conf *)addr;
672			if ((rqp = altq_lookup(fc->iface.rio_ifname,
673					       ALTQT_RIO)) == NULL) {
674				error = EBADF;
675				break;
676			}
677
678			new = rio_alloc(fc->rio_weight, &fc->q_params[0],
679					fc->rio_flags, fc->rio_pkttime);
680			if (new == NULL) {
681				error = ENOMEM;
682				break;
683			}
684
685#ifdef __NetBSD__
686			s = splnet();
687#else
688			s = splimp();
689#endif
690			_flushq(rqp->rq_q);
691			limit = fc->rio_limit;
692			if (limit < fc->q_params[RIO_NDROPPREC-1].th_max)
693				limit = fc->q_params[RIO_NDROPPREC-1].th_max;
694			qlimit(rqp->rq_q) = limit;
695
696			rio_destroy(rqp->rq_rio);
697			rqp->rq_rio = new;
698
699			splx(s);
700
701			/* write back new values */
702			fc->rio_limit = limit;
703			for (i = 0; i < RIO_NDROPPREC; i++) {
704				fc->q_params[i].inv_pmax =
705					rqp->rq_rio->rio_precstate[i].inv_pmax;
706				fc->q_params[i].th_min =
707					rqp->rq_rio->rio_precstate[i].th_min;
708				fc->q_params[i].th_max =
709					rqp->rq_rio->rio_precstate[i].th_max;
710			}
711		} while (/*CONSTCOND*/ 0);
712		break;
713
714	case RIO_SETDEFAULTS:
715		do {
716			struct redparams *rp;
717			int i;
718
719			rp = (struct redparams *)addr;
720			for (i = 0; i < RIO_NDROPPREC; i++)
721				default_rio_params[i] = rp[i];
722		} while (/*CONSTCOND*/ 0);
723		break;
724
725	default:
726		error = EINVAL;
727		break;
728	}
729
730	return error;
731}
732
733static int
734rio_detach(rqp)
735	rio_queue_t *rqp;
736{
737	rio_queue_t *tmp;
738	int error = 0;
739
740	if (ALTQ_IS_ENABLED(rqp->rq_ifq))
741		altq_disable(rqp->rq_ifq);
742
743	if ((error = altq_detach(rqp->rq_ifq)))
744		return (error);
745
746	if (rio_list == rqp)
747		rio_list = rqp->rq_next;
748	else {
749		for (tmp = rio_list; tmp != NULL; tmp = tmp->rq_next)
750			if (tmp->rq_next == rqp) {
751				tmp->rq_next = rqp->rq_next;
752				break;
753			}
754		if (tmp == NULL)
755			printf("rio_detach: no state found in rio_list!\n");
756	}
757
758	rio_destroy(rqp->rq_rio);
759	free(rqp->rq_q, M_DEVBUF);
760	free(rqp, M_DEVBUF);
761	return (error);
762}
763
764/*
765 * rio support routines
766 */
767static int
768rio_request(ifq, req, arg)
769	struct ifaltq *ifq;
770	int req;
771	void *arg;
772{
773	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
774
775	IFQ_LOCK_ASSERT(ifq);
776
777	switch (req) {
778	case ALTRQ_PURGE:
779		_flushq(rqp->rq_q);
780		if (ALTQ_IS_ENABLED(ifq))
781			ifq->ifq_len = 0;
782		break;
783	}
784	return (0);
785}
786
787/*
788 * enqueue routine:
789 *
790 *	returns: 0 when successfully queued.
791 *		 ENOBUFS when drop occurs.
792 */
793static int
794rio_enqueue(ifq, m, pktattr)
795	struct ifaltq *ifq;
796	struct mbuf *m;
797	struct altq_pktattr *pktattr;
798{
799	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
800	int error = 0;
801
802	IFQ_LOCK_ASSERT(ifq);
803
804	if (rio_addq(rqp->rq_rio, rqp->rq_q, m, pktattr) == 0)
805		ifq->ifq_len++;
806	else
807		error = ENOBUFS;
808	return error;
809}
810
811/*
812 * dequeue routine:
813 *	must be called in splimp.
814 *
815 *	returns: mbuf dequeued.
816 *		 NULL when no packet is available in the queue.
817 */
818
819static struct mbuf *
820rio_dequeue(ifq, op)
821	struct ifaltq *ifq;
822	int op;
823{
824	rio_queue_t *rqp = (rio_queue_t *)ifq->altq_disc;
825	struct mbuf *m = NULL;
826
827	IFQ_LOCK_ASSERT(ifq);
828
829	if (op == ALTDQ_POLL)
830		return qhead(rqp->rq_q);
831
832	m = rio_getq(rqp->rq_rio, rqp->rq_q);
833	if (m != NULL)
834		ifq->ifq_len--;
835	return m;
836}
837
838#ifdef KLD_MODULE
839
840static struct altqsw rio_sw =
841	{"rio", rioopen, rioclose, rioioctl};
842
843ALTQ_MODULE(altq_rio, ALTQT_RIO, &rio_sw);
844MODULE_VERSION(altq_rio, 1);
845MODULE_DEPEND(altq_rio, altq_red, 1, 1, 1);
846
847#endif /* KLD_MODULE */
848#endif /* ALTQ3_COMPAT */
849
850#endif /* ALTQ_RIO */
851