1/*	$FreeBSD$	*/
2/*	$KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $	*/
3
4/*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by the Network Research
19 *      Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 *    to endorse or promote products derived from this software without
22 *    specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
40#ident "@(#)rm_class.c  1.48     97/12/05 SMI"
41
42#if defined(__FreeBSD__) || defined(__NetBSD__)
43#include "opt_altq.h"
44#include "opt_inet.h"
45#ifdef __FreeBSD__
46#include "opt_inet6.h"
47#endif
48#endif /* __FreeBSD__ || __NetBSD__ */
49#ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
50
51#include <sys/param.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/socket.h>
55#include <sys/systm.h>
56#include <sys/errno.h>
57#include <sys/time.h>
58#ifdef ALTQ3_COMPAT
59#include <sys/kernel.h>
60#endif
61
62#include <net/if.h>
63#ifdef ALTQ3_COMPAT
64#include <netinet/in.h>
65#include <netinet/in_systm.h>
66#include <netinet/ip.h>
67#endif
68
69#include <altq/altq.h>
70#include <altq/altq_rmclass.h>
71#include <altq/altq_rmclass_debug.h>
72#include <altq/altq_red.h>
73#include <altq/altq_rio.h>
74
75/*
76 * Local Macros
77 */
78
79#define	reset_cutoff(ifd)	{ ifd->cutoff_ = RM_MAXDEPTH; }
80
81/*
82 * Local routines.
83 */
84
85static int	rmc_satisfied(struct rm_class *, struct timeval *);
86static void	rmc_wrr_set_weights(struct rm_ifdat *);
87static void	rmc_depth_compute(struct rm_class *);
88static void	rmc_depth_recompute(rm_class_t *);
89
90static mbuf_t	*_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
91static mbuf_t	*_rmc_prr_dequeue_next(struct rm_ifdat *, int);
92
93static int	_rmc_addq(rm_class_t *, mbuf_t *);
94static void	_rmc_dropq(rm_class_t *);
95static mbuf_t	*_rmc_getq(rm_class_t *);
96static mbuf_t	*_rmc_pollq(rm_class_t *);
97
98static int	rmc_under_limit(struct rm_class *, struct timeval *);
99static void	rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
100static void	rmc_drop_action(struct rm_class *);
101static void	rmc_restart(struct rm_class *);
102static void	rmc_root_overlimit(struct rm_class *, struct rm_class *);
103
104#define	BORROW_OFFTIME
105/*
106 * BORROW_OFFTIME (experimental):
107 * borrow the offtime of the class borrowing from.
108 * the reason is that when its own offtime is set, the class is unable
109 * to borrow much, especially when cutoff is taking effect.
110 * but when the borrowed class is overloaded (advidle is close to minidle),
111 * use the borrowing class's offtime to avoid overload.
112 */
113#define	ADJUST_CUTOFF
114/*
115 * ADJUST_CUTOFF (experimental):
116 * if no underlimit class is found due to cutoff, increase cutoff and
117 * retry the scheduling loop.
118 * also, don't invoke delay_actions while cutoff is taking effect,
119 * since a sleeping class won't have a chance to be scheduled in the
120 * next loop.
121 *
122 * now heuristics for setting the top-level variable (cutoff_) becomes:
123 *	1. if a packet arrives for a not-overlimit class, set cutoff
124 *	   to the depth of the class.
125 *	2. if cutoff is i, and a packet arrives for an overlimit class
126 *	   with an underlimit ancestor at a lower level than i (say j),
127 *	   then set cutoff to j.
128 *	3. at scheduling a packet, if there is no underlimit class
129 *	   due to the current cutoff level, increase cutoff by 1 and
130 *	   then try to schedule again.
131 */
132
133/*
134 * rm_class_t *
135 * rmc_newclass(...) - Create a new resource management class at priority
136 * 'pri' on the interface given by 'ifd'.
137 *
138 * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
139 *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
140 *              than 100% of the bandwidth, this number should be the
141 *              'effective' rate for the class.  Let f be the
142 *              bandwidth fraction allocated to this class, and let
143 *              nsPerByte be the data rate of the output link in
144 *              nanoseconds/byte.  Then nsecPerByte is set to
145 *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
146 *              for a class that gets 50% of an ethernet's bandwidth.
147 *
148 * action       the routine to call when the class is over limit.
149 *
150 * maxq         max allowable queue size for class (in packets).
151 *
152 * parent       parent class pointer.
153 *
154 * borrow       class to borrow from (should be either 'parent' or null).
155 *
156 * maxidle      max value allowed for class 'idle' time estimate (this
157 *              parameter determines how large an initial burst of packets
158 *              can be before overlimit action is invoked.
159 *
160 * offtime      how long 'delay' action will delay when class goes over
161 *              limit (this parameter determines the steady-state burst
162 *              size when a class is running over its limit).
163 *
164 * Maxidle and offtime have to be computed from the following:  If the
165 * average packet size is s, the bandwidth fraction allocated to this
166 * class is f, we want to allow b packet bursts, and the gain of the
167 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
168 *
169 *   ptime = s * nsPerByte * (1 - f) / f
170 *   maxidle = ptime * (1 - g^b) / g^b
171 *   minidle = -ptime * (1 / (f - 1))
172 *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
173 *
174 * Operationally, it's convenient to specify maxidle & offtime in units
175 * independent of the link bandwidth so the maxidle & offtime passed to
176 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
177 * (The constant factor is a scale factor needed to make the parameters
178 * integers.  This scaling also means that the 'unscaled' values of
179 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
180 * not nanoseconds.)  Also note that the 'idle' filter computation keeps
181 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
182 * maxidle also must be scaled upward by this value.  Thus, the passed
183 * values for maxidle and offtime can be computed as follows:
184 *
185 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
186 * offtime = offtime * 8 / (1000 * nsecPerByte)
187 *
188 * When USE_HRTIME is employed, then maxidle and offtime become:
189 * 	maxidle = maxilde * (8.0 / nsecPerByte);
190 * 	offtime = offtime * (8.0 / nsecPerByte);
191 */
192struct rm_class *
193rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
194    void (*action)(rm_class_t *, rm_class_t *), int maxq,
195    struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
196    int minidle, u_int offtime, int pktsize, int flags)
197{
198	struct rm_class	*cl;
199	struct rm_class	*peer;
200	int		 s;
201
202	if (pri >= RM_MAXPRIO)
203		return (NULL);
204#ifndef ALTQ_RED
205	if (flags & RMCF_RED) {
206#ifdef ALTQ_DEBUG
207		printf("rmc_newclass: RED not configured for CBQ!\n");
208#endif
209		return (NULL);
210	}
211#endif
212#ifndef ALTQ_RIO
213	if (flags & RMCF_RIO) {
214#ifdef ALTQ_DEBUG
215		printf("rmc_newclass: RIO not configured for CBQ!\n");
216#endif
217		return (NULL);
218	}
219#endif
220
221	cl = malloc(sizeof(struct rm_class),
222	       M_DEVBUF, M_WAITOK);
223	if (cl == NULL)
224		return (NULL);
225	bzero(cl, sizeof(struct rm_class));
226	CALLOUT_INIT(&cl->callout_);
227	cl->q_ = malloc(sizeof(class_queue_t),
228	       M_DEVBUF, M_WAITOK);
229	if (cl->q_ == NULL) {
230		free(cl, M_DEVBUF);
231		return (NULL);
232	}
233	bzero(cl->q_, sizeof(class_queue_t));
234
235	/*
236	 * Class initialization.
237	 */
238	cl->children_ = NULL;
239	cl->parent_ = parent;
240	cl->borrow_ = borrow;
241	cl->leaf_ = 1;
242	cl->ifdat_ = ifd;
243	cl->pri_ = pri;
244	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
245	cl->depth_ = 0;
246	cl->qthresh_ = 0;
247	cl->ns_per_byte_ = nsecPerByte;
248
249	qlimit(cl->q_) = maxq;
250	qtype(cl->q_) = Q_DROPHEAD;
251	qlen(cl->q_) = 0;
252	cl->flags_ = flags;
253
254#if 1 /* minidle is also scaled in ALTQ */
255	cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
256	if (cl->minidle_ > 0)
257		cl->minidle_ = 0;
258#else
259	cl->minidle_ = minidle;
260#endif
261	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
262	if (cl->maxidle_ == 0)
263		cl->maxidle_ = 1;
264#if 1 /* offtime is also scaled in ALTQ */
265	cl->avgidle_ = cl->maxidle_;
266	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
267	if (cl->offtime_ == 0)
268		cl->offtime_ = 1;
269#else
270	cl->avgidle_ = 0;
271	cl->offtime_ = (offtime * nsecPerByte) / 8;
272#endif
273	cl->overlimit = action;
274
275#ifdef ALTQ_RED
276	if (flags & (RMCF_RED|RMCF_RIO)) {
277		int red_flags, red_pkttime;
278
279		red_flags = 0;
280		if (flags & RMCF_ECN)
281			red_flags |= REDF_ECN;
282		if (flags & RMCF_FLOWVALVE)
283			red_flags |= REDF_FLOWVALVE;
284#ifdef ALTQ_RIO
285		if (flags & RMCF_CLEARDSCP)
286			red_flags |= RIOF_CLEARDSCP;
287#endif
288		red_pkttime = nsecPerByte * pktsize  / 1000;
289
290		if (flags & RMCF_RED) {
291			cl->red_ = red_alloc(0, 0,
292			    qlimit(cl->q_) * 10/100,
293			    qlimit(cl->q_) * 30/100,
294			    red_flags, red_pkttime);
295			if (cl->red_ != NULL)
296				qtype(cl->q_) = Q_RED;
297		}
298#ifdef ALTQ_RIO
299		else {
300			cl->red_ = (red_t *)rio_alloc(0, NULL,
301						      red_flags, red_pkttime);
302			if (cl->red_ != NULL)
303				qtype(cl->q_) = Q_RIO;
304		}
305#endif
306	}
307#endif /* ALTQ_RED */
308
309	/*
310	 * put the class into the class tree
311	 */
312#ifdef __NetBSD__
313	s = splnet();
314#else
315	s = splimp();
316#endif
317	IFQ_LOCK(ifd->ifq_);
318	if ((peer = ifd->active_[pri]) != NULL) {
319		/* find the last class at this pri */
320		cl->peer_ = peer;
321		while (peer->peer_ != ifd->active_[pri])
322			peer = peer->peer_;
323		peer->peer_ = cl;
324	} else {
325		ifd->active_[pri] = cl;
326		cl->peer_ = cl;
327	}
328
329	if (cl->parent_) {
330		cl->next_ = parent->children_;
331		parent->children_ = cl;
332		parent->leaf_ = 0;
333	}
334
335	/*
336	 * Compute the depth of this class and its ancestors in the class
337	 * hierarchy.
338	 */
339	rmc_depth_compute(cl);
340
341	/*
342	 * If CBQ's WRR is enabled, then initialize the class WRR state.
343	 */
344	if (ifd->wrr_) {
345		ifd->num_[pri]++;
346		ifd->alloc_[pri] += cl->allotment_;
347		rmc_wrr_set_weights(ifd);
348	}
349	IFQ_UNLOCK(ifd->ifq_);
350	splx(s);
351	return (cl);
352}
353
354int
355rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
356    int minidle, u_int offtime, int pktsize)
357{
358	struct rm_ifdat	*ifd;
359	u_int		 old_allotment;
360	int		 s;
361
362	ifd = cl->ifdat_;
363	old_allotment = cl->allotment_;
364
365#ifdef __NetBSD__
366	s = splnet();
367#else
368	s = splimp();
369#endif
370	IFQ_LOCK(ifd->ifq_);
371	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
372	cl->qthresh_ = 0;
373	cl->ns_per_byte_ = nsecPerByte;
374
375	qlimit(cl->q_) = maxq;
376
377#if 1 /* minidle is also scaled in ALTQ */
378	cl->minidle_ = (minidle * nsecPerByte) / 8;
379	if (cl->minidle_ > 0)
380		cl->minidle_ = 0;
381#else
382	cl->minidle_ = minidle;
383#endif
384	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
385	if (cl->maxidle_ == 0)
386		cl->maxidle_ = 1;
387#if 1 /* offtime is also scaled in ALTQ */
388	cl->avgidle_ = cl->maxidle_;
389	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
390	if (cl->offtime_ == 0)
391		cl->offtime_ = 1;
392#else
393	cl->avgidle_ = 0;
394	cl->offtime_ = (offtime * nsecPerByte) / 8;
395#endif
396
397	/*
398	 * If CBQ's WRR is enabled, then initialize the class WRR state.
399	 */
400	if (ifd->wrr_) {
401		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
402		rmc_wrr_set_weights(ifd);
403	}
404	IFQ_UNLOCK(ifd->ifq_);
405	splx(s);
406	return (0);
407}
408
409/*
410 * static void
411 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
412 *	the appropriate run robin weights for the CBQ weighted round robin
413 *	algorithm.
414 *
415 *	Returns: NONE
416 */
417
418static void
419rmc_wrr_set_weights(struct rm_ifdat *ifd)
420{
421	int		i;
422	struct rm_class	*cl, *clh;
423
424	for (i = 0; i < RM_MAXPRIO; i++) {
425		/*
426		 * This is inverted from that of the simulator to
427		 * maintain precision.
428		 */
429		if (ifd->num_[i] == 0)
430			ifd->M_[i] = 0;
431		else
432			ifd->M_[i] = ifd->alloc_[i] /
433				(ifd->num_[i] * ifd->maxpkt_);
434		/*
435		 * Compute the weighted allotment for each class.
436		 * This takes the expensive div instruction out
437		 * of the main loop for the wrr scheduling path.
438		 * These only get recomputed when a class comes or
439		 * goes.
440		 */
441		if (ifd->active_[i] != NULL) {
442			clh = cl = ifd->active_[i];
443			do {
444				/* safe-guard for slow link or alloc_ == 0 */
445				if (ifd->M_[i] == 0)
446					cl->w_allotment_ = 0;
447				else
448					cl->w_allotment_ = cl->allotment_ /
449						ifd->M_[i];
450				cl = cl->peer_;
451			} while ((cl != NULL) && (cl != clh));
452		}
453	}
454}
455
456int
457rmc_get_weight(struct rm_ifdat *ifd, int pri)
458{
459	if ((pri >= 0) && (pri < RM_MAXPRIO))
460		return (ifd->M_[pri]);
461	else
462		return (0);
463}
464
465/*
466 * static void
467 * rmc_depth_compute(struct rm_class *cl) - This function computes the
468 *	appropriate depth of class 'cl' and its ancestors.
469 *
470 *	Returns:	NONE
471 */
472
473static void
474rmc_depth_compute(struct rm_class *cl)
475{
476	rm_class_t	*t = cl, *p;
477
478	/*
479	 * Recompute the depth for the branch of the tree.
480	 */
481	while (t != NULL) {
482		p = t->parent_;
483		if (p && (t->depth_ >= p->depth_)) {
484			p->depth_ = t->depth_ + 1;
485			t = p;
486		} else
487			t = NULL;
488	}
489}
490
491/*
492 * static void
493 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
494 *	the depth of the tree after a class has been deleted.
495 *
496 *	Returns:	NONE
497 */
498
499static void
500rmc_depth_recompute(rm_class_t *cl)
501{
502#if 1 /* ALTQ */
503	rm_class_t	*p, *t;
504
505	p = cl;
506	while (p != NULL) {
507		if ((t = p->children_) == NULL) {
508			p->depth_ = 0;
509		} else {
510			int cdepth = 0;
511
512			while (t != NULL) {
513				if (t->depth_ > cdepth)
514					cdepth = t->depth_;
515				t = t->next_;
516			}
517
518			if (p->depth_ == cdepth + 1)
519				/* no change to this parent */
520				return;
521
522			p->depth_ = cdepth + 1;
523		}
524
525		p = p->parent_;
526	}
527#else
528	rm_class_t	*t;
529
530	if (cl->depth_ >= 1) {
531		if (cl->children_ == NULL) {
532			cl->depth_ = 0;
533		} else if ((t = cl->children_) != NULL) {
534			while (t != NULL) {
535				if (t->children_ != NULL)
536					rmc_depth_recompute(t);
537				t = t->next_;
538			}
539		} else
540			rmc_depth_compute(cl);
541	}
542#endif
543}
544
545/*
546 * void
547 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
548 *	function deletes a class from the link-sharing structure and frees
549 *	all resources associated with the class.
550 *
551 *	Returns: NONE
552 */
553
554void
555rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
556{
557	struct rm_class	*p, *head, *previous;
558	int		 s;
559
560	ASSERT(cl->children_ == NULL);
561
562	if (cl->sleeping_)
563		CALLOUT_STOP(&cl->callout_);
564
565#ifdef __NetBSD__
566	s = splnet();
567#else
568	s = splimp();
569#endif
570	IFQ_LOCK(ifd->ifq_);
571	/*
572	 * Free packets in the packet queue.
573	 * XXX - this may not be a desired behavior.  Packets should be
574	 *		re-queued.
575	 */
576	rmc_dropall(cl);
577
578	/*
579	 * If the class has a parent, then remove the class from the
580	 * class from the parent's children chain.
581	 */
582	if (cl->parent_ != NULL) {
583		head = cl->parent_->children_;
584		p = previous = head;
585		if (head->next_ == NULL) {
586			ASSERT(head == cl);
587			cl->parent_->children_ = NULL;
588			cl->parent_->leaf_ = 1;
589		} else while (p != NULL) {
590			if (p == cl) {
591				if (cl == head)
592					cl->parent_->children_ = cl->next_;
593				else
594					previous->next_ = cl->next_;
595				cl->next_ = NULL;
596				p = NULL;
597			} else {
598				previous = p;
599				p = p->next_;
600			}
601		}
602	}
603
604	/*
605	 * Delete class from class priority peer list.
606	 */
607	if ((p = ifd->active_[cl->pri_]) != NULL) {
608		/*
609		 * If there is more than one member of this priority
610		 * level, then look for class(cl) in the priority level.
611		 */
612		if (p != p->peer_) {
613			while (p->peer_ != cl)
614				p = p->peer_;
615			p->peer_ = cl->peer_;
616
617			if (ifd->active_[cl->pri_] == cl)
618				ifd->active_[cl->pri_] = cl->peer_;
619		} else {
620			ASSERT(p == cl);
621			ifd->active_[cl->pri_] = NULL;
622		}
623	}
624
625	/*
626	 * Recompute the WRR weights.
627	 */
628	if (ifd->wrr_) {
629		ifd->alloc_[cl->pri_] -= cl->allotment_;
630		ifd->num_[cl->pri_]--;
631		rmc_wrr_set_weights(ifd);
632	}
633
634	/*
635	 * Re-compute the depth of the tree.
636	 */
637#if 1 /* ALTQ */
638	rmc_depth_recompute(cl->parent_);
639#else
640	rmc_depth_recompute(ifd->root_);
641#endif
642
643	IFQ_UNLOCK(ifd->ifq_);
644	splx(s);
645
646	/*
647	 * Free the class structure.
648	 */
649	if (cl->red_ != NULL) {
650#ifdef ALTQ_RIO
651		if (q_is_rio(cl->q_))
652			rio_destroy((rio_t *)cl->red_);
653#endif
654#ifdef ALTQ_RED
655		if (q_is_red(cl->q_))
656			red_destroy(cl->red_);
657#endif
658	}
659	free(cl->q_, M_DEVBUF);
660	free(cl, M_DEVBUF);
661}
662
663
664/*
665 * void
666 * rmc_init(...) - Initialize the resource management data structures
667 *	associated with the output portion of interface 'ifp'.  'ifd' is
668 *	where the structures will be built (for backwards compatibility, the
669 *	structures aren't kept in the ifnet struct).  'nsecPerByte'
670 *	gives the link speed (inverse of bandwidth) in nanoseconds/byte.
671 *	'restart' is the driver-specific routine that the generic 'delay
672 *	until under limit' action will call to restart output.  `maxq'
673 *	is the queue size of the 'link' & 'default' classes.  'maxqueued'
674 *	is the maximum number of packets that the resource management
675 *	code will allow to be queued 'downstream' (this is typically 1).
676 *
677 *	Returns:	NONE
678 */
679
680void
681rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
682    void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
683    int minidle, u_int offtime, int flags)
684{
685	int		i, mtu;
686
687	/*
688	 * Initialize the CBQ tracing/debug facility.
689	 */
690	CBQTRACEINIT();
691
692	bzero((char *)ifd, sizeof (*ifd));
693	mtu = ifq->altq_ifp->if_mtu;
694	ifd->ifq_ = ifq;
695	ifd->restart = restart;
696	ifd->maxqueued_ = maxqueued;
697	ifd->ns_per_byte_ = nsecPerByte;
698	ifd->maxpkt_ = mtu;
699	ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
700	ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
701#if 1
702	ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
703	if (mtu * nsecPerByte > 10 * 1000000)
704		ifd->maxiftime_ /= 4;
705#endif
706
707	reset_cutoff(ifd);
708	CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
709
710	/*
711	 * Initialize the CBQ's WRR state.
712	 */
713	for (i = 0; i < RM_MAXPRIO; i++) {
714		ifd->alloc_[i] = 0;
715		ifd->M_[i] = 0;
716		ifd->num_[i] = 0;
717		ifd->na_[i] = 0;
718		ifd->active_[i] = NULL;
719	}
720
721	/*
722	 * Initialize current packet state.
723	 */
724	ifd->qi_ = 0;
725	ifd->qo_ = 0;
726	for (i = 0; i < RM_MAXQUEUED; i++) {
727		ifd->class_[i] = NULL;
728		ifd->curlen_[i] = 0;
729		ifd->borrowed_[i] = NULL;
730	}
731
732	/*
733	 * Create the root class of the link-sharing structure.
734	 */
735	if ((ifd->root_ = rmc_newclass(0, ifd,
736				       nsecPerByte,
737				       rmc_root_overlimit, maxq, 0, 0,
738				       maxidle, minidle, offtime,
739				       0, 0)) == NULL) {
740		printf("rmc_init: root class not allocated\n");
741		return ;
742	}
743	ifd->root_->depth_ = 0;
744}
745
746/*
747 * void
748 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
749 *	mbuf 'm' to queue for resource class 'cl'.  This routine is called
750 *	by a driver's if_output routine.  This routine must be called with
751 *	output packet completion interrupts locked out (to avoid racing with
752 *	rmc_dequeue_next).
753 *
754 *	Returns:	0 on successful queueing
755 *			-1 when packet drop occurs
756 */
757int
758rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
759{
760	struct timeval	 now;
761	struct rm_ifdat *ifd = cl->ifdat_;
762	int		 cpri = cl->pri_;
763	int		 is_empty = qempty(cl->q_);
764
765	RM_GETTIME(now);
766	if (ifd->cutoff_ > 0) {
767		if (TV_LT(&cl->undertime_, &now)) {
768			if (ifd->cutoff_ > cl->depth_)
769				ifd->cutoff_ = cl->depth_;
770			CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
771		}
772#if 1 /* ALTQ */
773		else {
774			/*
775			 * the class is overlimit. if the class has
776			 * underlimit ancestors, set cutoff to the lowest
777			 * depth among them.
778			 */
779			struct rm_class *borrow = cl->borrow_;
780
781			while (borrow != NULL &&
782			       borrow->depth_ < ifd->cutoff_) {
783				if (TV_LT(&borrow->undertime_, &now)) {
784					ifd->cutoff_ = borrow->depth_;
785					CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
786					break;
787				}
788				borrow = borrow->borrow_;
789			}
790		}
791#else /* !ALTQ */
792		else if ((ifd->cutoff_ > 1) && cl->borrow_) {
793			if (TV_LT(&cl->borrow_->undertime_, &now)) {
794				ifd->cutoff_ = cl->borrow_->depth_;
795				CBQTRACE(rmc_queue_packet, 'ffob',
796					 cl->borrow_->depth_);
797			}
798		}
799#endif /* !ALTQ */
800	}
801
802	if (_rmc_addq(cl, m) < 0)
803		/* failed */
804		return (-1);
805
806	if (is_empty) {
807		CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
808		ifd->na_[cpri]++;
809	}
810
811	if (qlen(cl->q_) > qlimit(cl->q_)) {
812		/* note: qlimit can be set to 0 or 1 */
813		rmc_drop_action(cl);
814		return (-1);
815	}
816	return (0);
817}
818
819/*
820 * void
821 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
822 *	classes to see if there are satified.
823 */
824
825static void
826rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
827{
828	int		 i;
829	rm_class_t	*p, *bp;
830
831	for (i = RM_MAXPRIO - 1; i >= 0; i--) {
832		if ((bp = ifd->active_[i]) != NULL) {
833			p = bp;
834			do {
835				if (!rmc_satisfied(p, now)) {
836					ifd->cutoff_ = p->depth_;
837					return;
838				}
839				p = p->peer_;
840			} while (p != bp);
841		}
842	}
843
844	reset_cutoff(ifd);
845}
846
847/*
848 * rmc_satisfied - Return 1 of the class is satisfied.  O, otherwise.
849 */
850
851static int
852rmc_satisfied(struct rm_class *cl, struct timeval *now)
853{
854	rm_class_t	*p;
855
856	if (cl == NULL)
857		return (1);
858	if (TV_LT(now, &cl->undertime_))
859		return (1);
860	if (cl->depth_ == 0) {
861		if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
862			return (0);
863		else
864			return (1);
865	}
866	if (cl->children_ != NULL) {
867		p = cl->children_;
868		while (p != NULL) {
869			if (!rmc_satisfied(p, now))
870				return (0);
871			p = p->next_;
872		}
873	}
874
875	return (1);
876}
877
878/*
879 * Return 1 if class 'cl' is under limit or can borrow from a parent,
880 * 0 if overlimit.  As a side-effect, this routine will invoke the
881 * class overlimit action if the class if overlimit.
882 */
883
884static int
885rmc_under_limit(struct rm_class *cl, struct timeval *now)
886{
887	rm_class_t	*p = cl;
888	rm_class_t	*top;
889	struct rm_ifdat	*ifd = cl->ifdat_;
890
891	ifd->borrowed_[ifd->qi_] = NULL;
892	/*
893	 * If cl is the root class, then always return that it is
894	 * underlimit.  Otherwise, check to see if the class is underlimit.
895	 */
896	if (cl->parent_ == NULL)
897		return (1);
898
899	if (cl->sleeping_) {
900		if (TV_LT(now, &cl->undertime_))
901			return (0);
902
903		CALLOUT_STOP(&cl->callout_);
904		cl->sleeping_ = 0;
905		cl->undertime_.tv_sec = 0;
906		return (1);
907	}
908
909	top = NULL;
910	while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
911		if (((cl = cl->borrow_) == NULL) ||
912		    (cl->depth_ > ifd->cutoff_)) {
913#ifdef ADJUST_CUTOFF
914			if (cl != NULL)
915				/* cutoff is taking effect, just
916				   return false without calling
917				   the delay action. */
918				return (0);
919#endif
920#ifdef BORROW_OFFTIME
921			/*
922			 * check if the class can borrow offtime too.
923			 * borrow offtime from the top of the borrow
924			 * chain if the top class is not overloaded.
925			 */
926			if (cl != NULL) {
927				/* cutoff is taking effect, use this class as top. */
928				top = cl;
929				CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
930			}
931			if (top != NULL && top->avgidle_ == top->minidle_)
932				top = NULL;
933			p->overtime_ = *now;
934			(p->overlimit)(p, top);
935#else
936			p->overtime_ = *now;
937			(p->overlimit)(p, NULL);
938#endif
939			return (0);
940		}
941		top = cl;
942	}
943
944	if (cl != p)
945		ifd->borrowed_[ifd->qi_] = cl;
946	return (1);
947}
948
949/*
950 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
951 *	Packet-by-packet round robin.
952 *
953 * The heart of the weighted round-robin scheduler, which decides which
954 * class next gets to send a packet.  Highest priority first, then
955 * weighted round-robin within priorites.
956 *
957 * Each able-to-send class gets to send until its byte allocation is
958 * exhausted.  Thus, the active pointer is only changed after a class has
959 * exhausted its allocation.
960 *
961 * If the scheduler finds no class that is underlimit or able to borrow,
962 * then the first class found that had a nonzero queue and is allowed to
963 * borrow gets to send.
964 */
965
966static mbuf_t *
967_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
968{
969	struct rm_class	*cl = NULL, *first = NULL;
970	u_int		 deficit;
971	int		 cpri;
972	mbuf_t		*m;
973	struct timeval	 now;
974
975	RM_GETTIME(now);
976
977	/*
978	 * if the driver polls the top of the queue and then removes
979	 * the polled packet, we must return the same packet.
980	 */
981	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
982		cl = ifd->pollcache_;
983		cpri = cl->pri_;
984		if (ifd->efficient_) {
985			/* check if this class is overlimit */
986			if (cl->undertime_.tv_sec != 0 &&
987			    rmc_under_limit(cl, &now) == 0)
988				first = cl;
989		}
990		ifd->pollcache_ = NULL;
991		goto _wrr_out;
992	}
993	else {
994		/* mode == ALTDQ_POLL || pollcache == NULL */
995		ifd->pollcache_ = NULL;
996		ifd->borrowed_[ifd->qi_] = NULL;
997	}
998#ifdef ADJUST_CUTOFF
999 _again:
1000#endif
1001	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1002		if (ifd->na_[cpri] == 0)
1003			continue;
1004		deficit = 0;
1005		/*
1006		 * Loop through twice for a priority level, if some class
1007		 * was unable to send a packet the first round because
1008		 * of the weighted round-robin mechanism.
1009		 * During the second loop at this level, deficit==2.
1010		 * (This second loop is not needed if for every class,
1011		 * "M[cl->pri_])" times "cl->allotment" is greater than
1012		 * the byte size for the largest packet in the class.)
1013		 */
1014 _wrr_loop:
1015		cl = ifd->active_[cpri];
1016		ASSERT(cl != NULL);
1017		do {
1018			if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1019				cl->bytes_alloc_ += cl->w_allotment_;
1020			if (!qempty(cl->q_)) {
1021				if ((cl->undertime_.tv_sec == 0) ||
1022				    rmc_under_limit(cl, &now)) {
1023					if (cl->bytes_alloc_ > 0 || deficit > 1)
1024						goto _wrr_out;
1025
1026					/* underlimit but no alloc */
1027					deficit = 1;
1028#if 1
1029					ifd->borrowed_[ifd->qi_] = NULL;
1030#endif
1031				}
1032				else if (first == NULL && cl->borrow_ != NULL)
1033					first = cl; /* borrowing candidate */
1034			}
1035
1036			cl->bytes_alloc_ = 0;
1037			cl = cl->peer_;
1038		} while (cl != ifd->active_[cpri]);
1039
1040		if (deficit == 1) {
1041			/* first loop found an underlimit class with deficit */
1042			/* Loop on same priority level, with new deficit.  */
1043			deficit = 2;
1044			goto _wrr_loop;
1045		}
1046	}
1047
1048#ifdef ADJUST_CUTOFF
1049	/*
1050	 * no underlimit class found.  if cutoff is taking effect,
1051	 * increase cutoff and try again.
1052	 */
1053	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1054		ifd->cutoff_++;
1055		CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1056		goto _again;
1057	}
1058#endif /* ADJUST_CUTOFF */
1059	/*
1060	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1061	 * class we encounter will send a packet if all the classes
1062	 * of the link-sharing structure are overlimit.
1063	 */
1064	reset_cutoff(ifd);
1065	CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1066
1067	if (!ifd->efficient_ || first == NULL)
1068		return (NULL);
1069
1070	cl = first;
1071	cpri = cl->pri_;
1072#if 0	/* too time-consuming for nothing */
1073	if (cl->sleeping_)
1074		CALLOUT_STOP(&cl->callout_);
1075	cl->sleeping_ = 0;
1076	cl->undertime_.tv_sec = 0;
1077#endif
1078	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1079	ifd->cutoff_ = cl->borrow_->depth_;
1080
1081	/*
1082	 * Deque the packet and do the book keeping...
1083	 */
1084 _wrr_out:
1085	if (op == ALTDQ_REMOVE) {
1086		m = _rmc_getq(cl);
1087		if (m == NULL)
1088			panic("_rmc_wrr_dequeue_next");
1089		if (qempty(cl->q_))
1090			ifd->na_[cpri]--;
1091
1092		/*
1093		 * Update class statistics and link data.
1094		 */
1095		if (cl->bytes_alloc_ > 0)
1096			cl->bytes_alloc_ -= m_pktlen(m);
1097
1098		if ((cl->bytes_alloc_ <= 0) || first == cl)
1099			ifd->active_[cl->pri_] = cl->peer_;
1100		else
1101			ifd->active_[cl->pri_] = cl;
1102
1103		ifd->class_[ifd->qi_] = cl;
1104		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1105		ifd->now_[ifd->qi_] = now;
1106		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1107		ifd->queued_++;
1108	} else {
1109		/* mode == ALTDQ_PPOLL */
1110		m = _rmc_pollq(cl);
1111		ifd->pollcache_ = cl;
1112	}
1113	return (m);
1114}
1115
1116/*
1117 * Dequeue & return next packet from the highest priority class that
1118 * has a packet to send & has enough allocation to send it.  This
1119 * routine is called by a driver whenever it needs a new packet to
1120 * output.
1121 */
1122static mbuf_t *
1123_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1124{
1125	mbuf_t		*m;
1126	int		 cpri;
1127	struct rm_class	*cl, *first = NULL;
1128	struct timeval	 now;
1129
1130	RM_GETTIME(now);
1131
1132	/*
1133	 * if the driver polls the top of the queue and then removes
1134	 * the polled packet, we must return the same packet.
1135	 */
1136	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1137		cl = ifd->pollcache_;
1138		cpri = cl->pri_;
1139		ifd->pollcache_ = NULL;
1140		goto _prr_out;
1141	} else {
1142		/* mode == ALTDQ_POLL || pollcache == NULL */
1143		ifd->pollcache_ = NULL;
1144		ifd->borrowed_[ifd->qi_] = NULL;
1145	}
1146#ifdef ADJUST_CUTOFF
1147 _again:
1148#endif
1149	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1150		if (ifd->na_[cpri] == 0)
1151			continue;
1152		cl = ifd->active_[cpri];
1153		ASSERT(cl != NULL);
1154		do {
1155			if (!qempty(cl->q_)) {
1156				if ((cl->undertime_.tv_sec == 0) ||
1157				    rmc_under_limit(cl, &now))
1158					goto _prr_out;
1159				if (first == NULL && cl->borrow_ != NULL)
1160					first = cl;
1161			}
1162			cl = cl->peer_;
1163		} while (cl != ifd->active_[cpri]);
1164	}
1165
1166#ifdef ADJUST_CUTOFF
1167	/*
1168	 * no underlimit class found.  if cutoff is taking effect, increase
1169	 * cutoff and try again.
1170	 */
1171	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1172		ifd->cutoff_++;
1173		goto _again;
1174	}
1175#endif /* ADJUST_CUTOFF */
1176	/*
1177	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1178	 * class we encounter will send a packet if all the classes
1179	 * of the link-sharing structure are overlimit.
1180	 */
1181	reset_cutoff(ifd);
1182	if (!ifd->efficient_ || first == NULL)
1183		return (NULL);
1184
1185	cl = first;
1186	cpri = cl->pri_;
1187#if 0	/* too time-consuming for nothing */
1188	if (cl->sleeping_)
1189		CALLOUT_STOP(&cl->callout_);
1190	cl->sleeping_ = 0;
1191	cl->undertime_.tv_sec = 0;
1192#endif
1193	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1194	ifd->cutoff_ = cl->borrow_->depth_;
1195
1196	/*
1197	 * Deque the packet and do the book keeping...
1198	 */
1199 _prr_out:
1200	if (op == ALTDQ_REMOVE) {
1201		m = _rmc_getq(cl);
1202		if (m == NULL)
1203			panic("_rmc_prr_dequeue_next");
1204		if (qempty(cl->q_))
1205			ifd->na_[cpri]--;
1206
1207		ifd->active_[cpri] = cl->peer_;
1208
1209		ifd->class_[ifd->qi_] = cl;
1210		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1211		ifd->now_[ifd->qi_] = now;
1212		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1213		ifd->queued_++;
1214	} else {
1215		/* mode == ALTDQ_POLL */
1216		m = _rmc_pollq(cl);
1217		ifd->pollcache_ = cl;
1218	}
1219	return (m);
1220}
1221
1222/*
1223 * mbuf_t *
1224 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1225 *	is invoked by the packet driver to get the next packet to be
1226 *	dequeued and output on the link.  If WRR is enabled, then the
1227 *	WRR dequeue next routine will determine the next packet to sent.
1228 *	Otherwise, packet-by-packet round robin is invoked.
1229 *
1230 *	Returns:	NULL, if a packet is not available or if all
1231 *			classes are overlimit.
1232 *
1233 *			Otherwise, Pointer to the next packet.
1234 */
1235
1236mbuf_t *
1237rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1238{
1239	if (ifd->queued_ >= ifd->maxqueued_)
1240		return (NULL);
1241	else if (ifd->wrr_)
1242		return (_rmc_wrr_dequeue_next(ifd, mode));
1243	else
1244		return (_rmc_prr_dequeue_next(ifd, mode));
1245}
1246
1247/*
1248 * Update the utilization estimate for the packet that just completed.
1249 * The packet's class & the parent(s) of that class all get their
1250 * estimators updated.  This routine is called by the driver's output-
1251 * packet-completion interrupt service routine.
1252 */
1253
1254/*
1255 * a macro to approximate "divide by 1000" that gives 0.000999,
1256 * if a value has enough effective digits.
1257 * (on pentium, mul takes 9 cycles but div takes 46!)
1258 */
1259#define	NSEC_TO_USEC(t)	(((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1260void
1261rmc_update_class_util(struct rm_ifdat *ifd)
1262{
1263	int		 idle, avgidle, pktlen;
1264	int		 pkt_time, tidle;
1265	rm_class_t	*cl, *borrowed;
1266	rm_class_t	*borrows;
1267	struct timeval	*nowp;
1268
1269	/*
1270	 * Get the most recent completed class.
1271	 */
1272	if ((cl = ifd->class_[ifd->qo_]) == NULL)
1273		return;
1274
1275	pktlen = ifd->curlen_[ifd->qo_];
1276	borrowed = ifd->borrowed_[ifd->qo_];
1277	borrows = borrowed;
1278
1279	PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1280
1281	/*
1282	 * Run estimator on class and its ancestors.
1283	 */
1284	/*
1285	 * rm_update_class_util is designed to be called when the
1286	 * transfer is completed from a xmit complete interrupt,
1287	 * but most drivers don't implement an upcall for that.
1288	 * so, just use estimated completion time.
1289	 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1290	 */
1291	nowp = &ifd->now_[ifd->qo_];
1292	/* get pkt_time (for link) in usec */
1293#if 1  /* use approximation */
1294	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1295	pkt_time = NSEC_TO_USEC(pkt_time);
1296#else
1297	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1298#endif
1299#if 1 /* ALTQ4PPP */
1300	if (TV_LT(nowp, &ifd->ifnow_)) {
1301		int iftime;
1302
1303		/*
1304		 * make sure the estimated completion time does not go
1305		 * too far.  it can happen when the link layer supports
1306		 * data compression or the interface speed is set to
1307		 * a much lower value.
1308		 */
1309		TV_DELTA(&ifd->ifnow_, nowp, iftime);
1310		if (iftime+pkt_time < ifd->maxiftime_) {
1311			TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1312		} else {
1313			TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1314		}
1315	} else {
1316		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1317	}
1318#else
1319	if (TV_LT(nowp, &ifd->ifnow_)) {
1320		TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1321	} else {
1322		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1323	}
1324#endif
1325
1326	while (cl != NULL) {
1327		TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1328		if (idle >= 2000000)
1329			/*
1330			 * this class is idle enough, reset avgidle.
1331			 * (TV_DELTA returns 2000000 us when delta is large.)
1332			 */
1333			cl->avgidle_ = cl->maxidle_;
1334
1335		/* get pkt_time (for class) in usec */
1336#if 1  /* use approximation */
1337		pkt_time = pktlen * cl->ns_per_byte_;
1338		pkt_time = NSEC_TO_USEC(pkt_time);
1339#else
1340		pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1341#endif
1342		idle -= pkt_time;
1343
1344		avgidle = cl->avgidle_;
1345		avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1346		cl->avgidle_ = avgidle;
1347
1348		/* Are we overlimit ? */
1349		if (avgidle <= 0) {
1350			CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1351#if 1 /* ALTQ */
1352			/*
1353			 * need some lower bound for avgidle, otherwise
1354			 * a borrowing class gets unbounded penalty.
1355			 */
1356			if (avgidle < cl->minidle_)
1357				avgidle = cl->avgidle_ = cl->minidle_;
1358#endif
1359			/* set next idle to make avgidle 0 */
1360			tidle = pkt_time +
1361				(((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1362			TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1363			++cl->stats_.over;
1364		} else {
1365			cl->avgidle_ =
1366			    (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1367			cl->undertime_.tv_sec = 0;
1368			if (cl->sleeping_) {
1369				CALLOUT_STOP(&cl->callout_);
1370				cl->sleeping_ = 0;
1371			}
1372		}
1373
1374		if (borrows != NULL) {
1375			if (borrows != cl)
1376				++cl->stats_.borrows;
1377			else
1378				borrows = NULL;
1379		}
1380		cl->last_ = ifd->ifnow_;
1381		cl->last_pkttime_ = pkt_time;
1382
1383#if 1
1384		if (cl->parent_ == NULL) {
1385			/* take stats of root class */
1386			PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1387		}
1388#endif
1389
1390		cl = cl->parent_;
1391	}
1392
1393	/*
1394	 * Check to see if cutoff needs to set to a new level.
1395	 */
1396	cl = ifd->class_[ifd->qo_];
1397	if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1398#if 1 /* ALTQ */
1399		if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1400			rmc_tl_satisfied(ifd, nowp);
1401			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1402		} else {
1403			ifd->cutoff_ = borrowed->depth_;
1404			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1405		}
1406#else /* !ALTQ */
1407		if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1408			reset_cutoff(ifd);
1409#ifdef notdef
1410			rmc_tl_satisfied(ifd, &now);
1411#endif
1412			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1413		} else {
1414			ifd->cutoff_ = borrowed->depth_;
1415			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1416		}
1417#endif /* !ALTQ */
1418	}
1419
1420	/*
1421	 * Release class slot
1422	 */
1423	ifd->borrowed_[ifd->qo_] = NULL;
1424	ifd->class_[ifd->qo_] = NULL;
1425	ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1426	ifd->queued_--;
1427}
1428
1429/*
1430 * void
1431 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1432 *	over-limit action routines.  These get invoked by rmc_under_limit()
1433 *	if a class with packets to send if over its bandwidth limit & can't
1434 *	borrow from a parent class.
1435 *
1436 *	Returns: NONE
1437 */
1438
1439static void
1440rmc_drop_action(struct rm_class *cl)
1441{
1442	struct rm_ifdat	*ifd = cl->ifdat_;
1443
1444	ASSERT(qlen(cl->q_) > 0);
1445	_rmc_dropq(cl);
1446	if (qempty(cl->q_))
1447		ifd->na_[cl->pri_]--;
1448}
1449
1450void rmc_dropall(struct rm_class *cl)
1451{
1452	struct rm_ifdat	*ifd = cl->ifdat_;
1453
1454	if (!qempty(cl->q_)) {
1455		_flushq(cl->q_);
1456
1457		ifd->na_[cl->pri_]--;
1458	}
1459}
1460
1461#if (__FreeBSD_version > 300000)
1462/* hzto() is removed from FreeBSD-3.0 */
1463static int hzto(struct timeval *);
1464
1465static int
1466hzto(tv)
1467	struct timeval *tv;
1468{
1469	struct timeval t2;
1470
1471	getmicrotime(&t2);
1472	t2.tv_sec = tv->tv_sec - t2.tv_sec;
1473	t2.tv_usec = tv->tv_usec - t2.tv_usec;
1474	return (tvtohz(&t2));
1475}
1476#endif /* __FreeBSD_version > 300000 */
1477
1478/*
1479 * void
1480 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1481 *	delay action routine.  It is invoked via rmc_under_limit when the
1482 *	packet is discoverd to be overlimit.
1483 *
1484 *	If the delay action is result of borrow class being overlimit, then
1485 *	delay for the offtime of the borrowing class that is overlimit.
1486 *
1487 *	Returns: NONE
1488 */
1489
1490void
1491rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1492{
1493	int	delay, t, extradelay;
1494
1495	cl->stats_.overactions++;
1496	TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1497#ifndef BORROW_OFFTIME
1498	delay += cl->offtime_;
1499#endif
1500
1501	if (!cl->sleeping_) {
1502		CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1503#ifdef BORROW_OFFTIME
1504		if (borrow != NULL)
1505			extradelay = borrow->offtime_;
1506		else
1507#endif
1508			extradelay = cl->offtime_;
1509
1510#ifdef ALTQ
1511		/*
1512		 * XXX recalculate suspend time:
1513		 * current undertime is (tidle + pkt_time) calculated
1514		 * from the last transmission.
1515		 *	tidle: time required to bring avgidle back to 0
1516		 *	pkt_time: target waiting time for this class
1517		 * we need to replace pkt_time by offtime
1518		 */
1519		extradelay -= cl->last_pkttime_;
1520#endif
1521		if (extradelay > 0) {
1522			TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1523			delay += extradelay;
1524		}
1525
1526		cl->sleeping_ = 1;
1527		cl->stats_.delays++;
1528
1529		/*
1530		 * Since packets are phased randomly with respect to the
1531		 * clock, 1 tick (the next clock tick) can be an arbitrarily
1532		 * short time so we have to wait for at least two ticks.
1533		 * NOTE:  If there's no other traffic, we need the timer as
1534		 * a 'backstop' to restart this class.
1535		 */
1536		if (delay > tick * 2) {
1537#ifdef __FreeBSD__
1538			/* FreeBSD rounds up the tick */
1539			t = hzto(&cl->undertime_);
1540#else
1541			/* other BSDs round down the tick */
1542			t = hzto(&cl->undertime_) + 1;
1543#endif
1544		} else
1545			t = 2;
1546		CALLOUT_RESET(&cl->callout_, t,
1547			      (timeout_t *)rmc_restart, (caddr_t)cl);
1548	}
1549}
1550
1551/*
1552 * void
1553 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1554 *	called by the system timer code & is responsible checking if the
1555 *	class is still sleeping (it might have been restarted as a side
1556 *	effect of the queue scan on a packet arrival) and, if so, restarting
1557 *	output for the class.  Inspecting the class state & restarting output
1558 *	require locking the class structure.  In general the driver is
1559 *	responsible for locking but this is the only routine that is not
1560 *	called directly or indirectly from the interface driver so it has
1561 *	know about system locking conventions.  Under bsd, locking is done
1562 *	by raising IPL to splimp so that's what's implemented here.  On a
1563 *	different system this would probably need to be changed.
1564 *
1565 *	Returns:	NONE
1566 */
1567
1568static void
1569rmc_restart(struct rm_class *cl)
1570{
1571	struct rm_ifdat	*ifd = cl->ifdat_;
1572	int		 s;
1573
1574#ifdef __NetBSD__
1575	s = splnet();
1576#else
1577	s = splimp();
1578#endif
1579	IFQ_LOCK(ifd->ifq_);
1580	if (cl->sleeping_) {
1581		cl->sleeping_ = 0;
1582		cl->undertime_.tv_sec = 0;
1583
1584		if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1585			CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1586			(ifd->restart)(ifd->ifq_);
1587		}
1588	}
1589	IFQ_UNLOCK(ifd->ifq_);
1590	splx(s);
1591}
1592
1593/*
1594 * void
1595 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1596 *	handling routine for the root class of the link sharing structure.
1597 *
1598 *	Returns: NONE
1599 */
1600
1601static void
1602rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1603{
1604    panic("rmc_root_overlimit");
1605}
1606
1607/*
1608 * Packet Queue handling routines.  Eventually, this is to localize the
1609 *	effects on the code whether queues are red queues or droptail
1610 *	queues.
1611 */
1612
1613static int
1614_rmc_addq(rm_class_t *cl, mbuf_t *m)
1615{
1616#ifdef ALTQ_RIO
1617	if (q_is_rio(cl->q_))
1618		return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1619#endif
1620#ifdef ALTQ_RED
1621	if (q_is_red(cl->q_))
1622		return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1623#endif /* ALTQ_RED */
1624
1625	if (cl->flags_ & RMCF_CLEARDSCP)
1626		write_dsfield(m, cl->pktattr_, 0);
1627
1628	_addq(cl->q_, m);
1629	return (0);
1630}
1631
1632/* note: _rmc_dropq is not called for red */
1633static void
1634_rmc_dropq(rm_class_t *cl)
1635{
1636	mbuf_t	*m;
1637
1638	if ((m = _getq(cl->q_)) != NULL)
1639		m_freem(m);
1640}
1641
1642static mbuf_t *
1643_rmc_getq(rm_class_t *cl)
1644{
1645#ifdef ALTQ_RIO
1646	if (q_is_rio(cl->q_))
1647		return rio_getq((rio_t *)cl->red_, cl->q_);
1648#endif
1649#ifdef ALTQ_RED
1650	if (q_is_red(cl->q_))
1651		return red_getq(cl->red_, cl->q_);
1652#endif
1653	return _getq(cl->q_);
1654}
1655
1656static mbuf_t *
1657_rmc_pollq(rm_class_t *cl)
1658{
1659	return qhead(cl->q_);
1660}
1661
1662#ifdef CBQ_TRACE
1663
1664struct cbqtrace		 cbqtrace_buffer[NCBQTRACE+1];
1665struct cbqtrace		*cbqtrace_ptr = NULL;
1666int			 cbqtrace_count;
1667
1668/*
1669 * DDB hook to trace cbq events:
1670 *  the last 1024 events are held in a circular buffer.
1671 *  use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1672 */
1673void cbqtrace_dump(int);
1674static char *rmc_funcname(void *);
1675
1676static struct rmc_funcs {
1677	void	*func;
1678	char	*name;
1679} rmc_funcs[] =
1680{
1681	rmc_init,		"rmc_init",
1682	rmc_queue_packet,	"rmc_queue_packet",
1683	rmc_under_limit,	"rmc_under_limit",
1684	rmc_update_class_util,	"rmc_update_class_util",
1685	rmc_delay_action,	"rmc_delay_action",
1686	rmc_restart,		"rmc_restart",
1687	_rmc_wrr_dequeue_next,	"_rmc_wrr_dequeue_next",
1688	NULL,			NULL
1689};
1690
1691static char *rmc_funcname(void *func)
1692{
1693	struct rmc_funcs *fp;
1694
1695	for (fp = rmc_funcs; fp->func != NULL; fp++)
1696		if (fp->func == func)
1697			return (fp->name);
1698	return ("unknown");
1699}
1700
1701void cbqtrace_dump(int counter)
1702{
1703	int	 i, *p;
1704	char	*cp;
1705
1706	counter = counter % NCBQTRACE;
1707	p = (int *)&cbqtrace_buffer[counter];
1708
1709	for (i=0; i<20; i++) {
1710		printf("[0x%x] ", *p++);
1711		printf("%s: ", rmc_funcname((void *)*p++));
1712		cp = (char *)p++;
1713		printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1714		printf("%d\n",*p++);
1715
1716		if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1717			p = (int *)cbqtrace_buffer;
1718	}
1719}
1720#endif /* CBQ_TRACE */
1721#endif /* ALTQ_CBQ */
1722
1723#if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
1724#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1725
1726void
1727_addq(class_queue_t *q, mbuf_t *m)
1728{
1729        mbuf_t	*m0;
1730
1731	if ((m0 = qtail(q)) != NULL)
1732		m->m_nextpkt = m0->m_nextpkt;
1733	else
1734		m0 = m;
1735	m0->m_nextpkt = m;
1736	qtail(q) = m;
1737	qlen(q)++;
1738}
1739
1740mbuf_t *
1741_getq(class_queue_t *q)
1742{
1743	mbuf_t	*m, *m0;
1744
1745	if ((m = qtail(q)) == NULL)
1746		return (NULL);
1747	if ((m0 = m->m_nextpkt) != m)
1748		m->m_nextpkt = m0->m_nextpkt;
1749	else {
1750		ASSERT(qlen(q) == 1);
1751		qtail(q) = NULL;
1752	}
1753	qlen(q)--;
1754	m0->m_nextpkt = NULL;
1755	return (m0);
1756}
1757
1758/* drop a packet at the tail of the queue */
1759mbuf_t *
1760_getq_tail(class_queue_t *q)
1761{
1762	mbuf_t	*m, *m0, *prev;
1763
1764	if ((m = m0 = qtail(q)) == NULL)
1765		return NULL;
1766	do {
1767		prev = m0;
1768		m0 = m0->m_nextpkt;
1769	} while (m0 != m);
1770	prev->m_nextpkt = m->m_nextpkt;
1771	if (prev == m)  {
1772		ASSERT(qlen(q) == 1);
1773		qtail(q) = NULL;
1774	} else
1775		qtail(q) = prev;
1776	qlen(q)--;
1777	m->m_nextpkt = NULL;
1778	return (m);
1779}
1780
1781/* randomly select a packet in the queue */
1782mbuf_t *
1783_getq_random(class_queue_t *q)
1784{
1785	struct mbuf	*m;
1786	int		 i, n;
1787
1788	if ((m = qtail(q)) == NULL)
1789		return NULL;
1790	if (m->m_nextpkt == m) {
1791		ASSERT(qlen(q) == 1);
1792		qtail(q) = NULL;
1793	} else {
1794		struct mbuf *prev = NULL;
1795
1796		n = arc4random() % qlen(q) + 1;
1797		for (i = 0; i < n; i++) {
1798			prev = m;
1799			m = m->m_nextpkt;
1800		}
1801		prev->m_nextpkt = m->m_nextpkt;
1802		if (m == qtail(q))
1803			qtail(q) = prev;
1804	}
1805	qlen(q)--;
1806	m->m_nextpkt = NULL;
1807	return (m);
1808}
1809
1810void
1811_removeq(class_queue_t *q, mbuf_t *m)
1812{
1813	mbuf_t	*m0, *prev;
1814
1815	m0 = qtail(q);
1816	do {
1817		prev = m0;
1818		m0 = m0->m_nextpkt;
1819	} while (m0 != m);
1820	prev->m_nextpkt = m->m_nextpkt;
1821	if (prev == m)
1822		qtail(q) = NULL;
1823	else if (qtail(q) == m)
1824		qtail(q) = prev;
1825	qlen(q)--;
1826}
1827
1828void
1829_flushq(class_queue_t *q)
1830{
1831	mbuf_t *m;
1832
1833	while ((m = _getq(q)) != NULL)
1834		m_freem(m);
1835	ASSERT(qlen(q) == 0);
1836}
1837
1838#endif /* !__GNUC__ || ALTQ_DEBUG */
1839#endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
1840