altq_rmclass.c revision 130368
1/*	$FreeBSD: head/sys/contrib/altq/altq/altq_rmclass.c 130368 2004-06-12 00:57:20Z mlaier $	*/
2/*	$KAME: altq_rmclass.c,v 1.18 2003/11/06 06:32:53 kjc Exp $	*/
3
4/*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by the Network Research
19 *      Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 *    to endorse or promote products derived from this software without
22 *    specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
40#ident "@(#)rm_class.c  1.48     97/12/05 SMI"
41
42#if defined(__FreeBSD__) || defined(__NetBSD__)
43#include "opt_altq.h"
44#if (__FreeBSD__ != 2)
45#include "opt_inet.h"
46#ifdef __FreeBSD__
47#include "opt_inet6.h"
48#endif
49#endif
50#endif /* __FreeBSD__ || __NetBSD__ */
51#ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
52
53#include <sys/param.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/socket.h>
57#include <sys/systm.h>
58#include <sys/errno.h>
59#include <sys/time.h>
60#ifdef ALTQ3_COMPAT
61#include <sys/kernel.h>
62#endif
63
64#include <net/if.h>
65#ifdef ALTQ3_COMPAT
66#include <netinet/in.h>
67#include <netinet/in_systm.h>
68#include <netinet/ip.h>
69#endif
70
71#include <altq/altq.h>
72#include <altq/altq_rmclass.h>
73#include <altq/altq_rmclass_debug.h>
74#include <altq/altq_red.h>
75#include <altq/altq_rio.h>
76
77/*
78 * Local Macros
79 */
80
81#define	reset_cutoff(ifd)	{ ifd->cutoff_ = RM_MAXDEPTH; }
82
83/*
84 * Local routines.
85 */
86
87static int	rmc_satisfied(struct rm_class *, struct timeval *);
88static void	rmc_wrr_set_weights(struct rm_ifdat *);
89static void	rmc_depth_compute(struct rm_class *);
90static void	rmc_depth_recompute(rm_class_t *);
91
92static mbuf_t	*_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
93static mbuf_t	*_rmc_prr_dequeue_next(struct rm_ifdat *, int);
94
95static int	_rmc_addq(rm_class_t *, mbuf_t *);
96static void	_rmc_dropq(rm_class_t *);
97static mbuf_t	*_rmc_getq(rm_class_t *);
98static mbuf_t	*_rmc_pollq(rm_class_t *);
99
100static int	rmc_under_limit(struct rm_class *, struct timeval *);
101static void	rmc_tl_satisfied(struct rm_ifdat *, struct timeval *);
102static void	rmc_drop_action(struct rm_class *);
103static void	rmc_restart(struct rm_class *);
104static void	rmc_root_overlimit(struct rm_class *, struct rm_class *);
105
106#define	BORROW_OFFTIME
107/*
108 * BORROW_OFFTIME (experimental):
109 * borrow the offtime of the class borrowing from.
110 * the reason is that when its own offtime is set, the class is unable
111 * to borrow much, especially when cutoff is taking effect.
112 * but when the borrowed class is overloaded (advidle is close to minidle),
113 * use the borrowing class's offtime to avoid overload.
114 */
115#define	ADJUST_CUTOFF
116/*
117 * ADJUST_CUTOFF (experimental):
118 * if no underlimit class is found due to cutoff, increase cutoff and
119 * retry the scheduling loop.
120 * also, don't invoke delay_actions while cutoff is taking effect,
121 * since a sleeping class won't have a chance to be scheduled in the
122 * next loop.
123 *
124 * now heuristics for setting the top-level variable (cutoff_) becomes:
125 *	1. if a packet arrives for a not-overlimit class, set cutoff
126 *	   to the depth of the class.
127 *	2. if cutoff is i, and a packet arrives for an overlimit class
128 *	   with an underlimit ancestor at a lower level than i (say j),
129 *	   then set cutoff to j.
130 *	3. at scheduling a packet, if there is no underlimit class
131 *	   due to the current cutoff level, increase cutoff by 1 and
132 *	   then try to schedule again.
133 */
134
135/*
136 * rm_class_t *
137 * rmc_newclass(...) - Create a new resource management class at priority
138 * 'pri' on the interface given by 'ifd'.
139 *
140 * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
141 *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
142 *              than 100% of the bandwidth, this number should be the
143 *              'effective' rate for the class.  Let f be the
144 *              bandwidth fraction allocated to this class, and let
145 *              nsPerByte be the data rate of the output link in
146 *              nanoseconds/byte.  Then nsecPerByte is set to
147 *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
148 *              for a class that gets 50% of an ethernet's bandwidth.
149 *
150 * action       the routine to call when the class is over limit.
151 *
152 * maxq         max allowable queue size for class (in packets).
153 *
154 * parent       parent class pointer.
155 *
156 * borrow       class to borrow from (should be either 'parent' or null).
157 *
158 * maxidle      max value allowed for class 'idle' time estimate (this
159 *              parameter determines how large an initial burst of packets
160 *              can be before overlimit action is invoked.
161 *
162 * offtime      how long 'delay' action will delay when class goes over
163 *              limit (this parameter determines the steady-state burst
164 *              size when a class is running over its limit).
165 *
166 * Maxidle and offtime have to be computed from the following:  If the
167 * average packet size is s, the bandwidth fraction allocated to this
168 * class is f, we want to allow b packet bursts, and the gain of the
169 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
170 *
171 *   ptime = s * nsPerByte * (1 - f) / f
172 *   maxidle = ptime * (1 - g^b) / g^b
173 *   minidle = -ptime * (1 / (f - 1))
174 *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
175 *
176 * Operationally, it's convenient to specify maxidle & offtime in units
177 * independent of the link bandwidth so the maxidle & offtime passed to
178 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
179 * (The constant factor is a scale factor needed to make the parameters
180 * integers.  This scaling also means that the 'unscaled' values of
181 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
182 * not nanoseconds.)  Also note that the 'idle' filter computation keeps
183 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
184 * maxidle also must be scaled upward by this value.  Thus, the passed
185 * values for maxidle and offtime can be computed as follows:
186 *
187 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
188 * offtime = offtime * 8 / (1000 * nsecPerByte)
189 *
190 * When USE_HRTIME is employed, then maxidle and offtime become:
191 * 	maxidle = maxilde * (8.0 / nsecPerByte);
192 * 	offtime = offtime * (8.0 / nsecPerByte);
193 */
194struct rm_class *
195rmc_newclass(int pri, struct rm_ifdat *ifd, u_int nsecPerByte,
196    void (*action)(rm_class_t *, rm_class_t *), int maxq,
197    struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
198    int minidle, u_int offtime, int pktsize, int flags)
199{
200	struct rm_class	*cl;
201	struct rm_class	*peer;
202	int		 s;
203
204	if (pri >= RM_MAXPRIO)
205		return (NULL);
206#ifndef ALTQ_RED
207	if (flags & RMCF_RED) {
208#ifdef ALTQ_DEBUG
209		printf("rmc_newclass: RED not configured for CBQ!\n");
210#endif
211		return (NULL);
212	}
213#endif
214#ifndef ALTQ_RIO
215	if (flags & RMCF_RIO) {
216#ifdef ALTQ_DEBUG
217		printf("rmc_newclass: RIO not configured for CBQ!\n");
218#endif
219		return (NULL);
220	}
221#endif
222
223	MALLOC(cl, struct rm_class *, sizeof(struct rm_class),
224	       M_DEVBUF, M_WAITOK);
225	if (cl == NULL)
226		return (NULL);
227	bzero(cl, sizeof(struct rm_class));
228	CALLOUT_INIT(&cl->callout_);
229	MALLOC(cl->q_, class_queue_t *, sizeof(class_queue_t),
230	       M_DEVBUF, M_WAITOK);
231	if (cl->q_ == NULL) {
232		FREE(cl, M_DEVBUF);
233		return (NULL);
234	}
235	bzero(cl->q_, sizeof(class_queue_t));
236
237	/*
238	 * Class initialization.
239	 */
240	cl->children_ = NULL;
241	cl->parent_ = parent;
242	cl->borrow_ = borrow;
243	cl->leaf_ = 1;
244	cl->ifdat_ = ifd;
245	cl->pri_ = pri;
246	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
247	cl->depth_ = 0;
248	cl->qthresh_ = 0;
249	cl->ns_per_byte_ = nsecPerByte;
250
251	qlimit(cl->q_) = maxq;
252	qtype(cl->q_) = Q_DROPHEAD;
253	qlen(cl->q_) = 0;
254	cl->flags_ = flags;
255
256#if 1 /* minidle is also scaled in ALTQ */
257	cl->minidle_ = (minidle * (int)nsecPerByte) / 8;
258	if (cl->minidle_ > 0)
259		cl->minidle_ = 0;
260#else
261	cl->minidle_ = minidle;
262#endif
263	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
264	if (cl->maxidle_ == 0)
265		cl->maxidle_ = 1;
266#if 1 /* offtime is also scaled in ALTQ */
267	cl->avgidle_ = cl->maxidle_;
268	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
269	if (cl->offtime_ == 0)
270		cl->offtime_ = 1;
271#else
272	cl->avgidle_ = 0;
273	cl->offtime_ = (offtime * nsecPerByte) / 8;
274#endif
275	cl->overlimit = action;
276
277#ifdef ALTQ_RED
278	if (flags & (RMCF_RED|RMCF_RIO)) {
279		int red_flags, red_pkttime;
280
281		red_flags = 0;
282		if (flags & RMCF_ECN)
283			red_flags |= REDF_ECN;
284		if (flags & RMCF_FLOWVALVE)
285			red_flags |= REDF_FLOWVALVE;
286#ifdef ALTQ_RIO
287		if (flags & RMCF_CLEARDSCP)
288			red_flags |= RIOF_CLEARDSCP;
289#endif
290		red_pkttime = nsecPerByte * pktsize  / 1000;
291
292		if (flags & RMCF_RED) {
293			cl->red_ = red_alloc(0, 0,
294			    qlimit(cl->q_) * 10/100,
295			    qlimit(cl->q_) * 30/100,
296			    red_flags, red_pkttime);
297			if (cl->red_ != NULL)
298				qtype(cl->q_) = Q_RED;
299		}
300#ifdef ALTQ_RIO
301		else {
302			cl->red_ = (red_t *)rio_alloc(0, NULL,
303						      red_flags, red_pkttime);
304			if (cl->red_ != NULL)
305				qtype(cl->q_) = Q_RIO;
306		}
307#endif
308	}
309#endif /* ALTQ_RED */
310
311	/*
312	 * put the class into the class tree
313	 */
314#ifdef __NetBSD__
315	s = splnet();
316#else
317	s = splimp();
318#endif
319	IFQ_LOCK(ifd->ifq_);
320	if ((peer = ifd->active_[pri]) != NULL) {
321		/* find the last class at this pri */
322		cl->peer_ = peer;
323		while (peer->peer_ != ifd->active_[pri])
324			peer = peer->peer_;
325		peer->peer_ = cl;
326	} else {
327		ifd->active_[pri] = cl;
328		cl->peer_ = cl;
329	}
330
331	if (cl->parent_) {
332		cl->next_ = parent->children_;
333		parent->children_ = cl;
334		parent->leaf_ = 0;
335	}
336
337	/*
338	 * Compute the depth of this class and its ancestors in the class
339	 * hierarchy.
340	 */
341	rmc_depth_compute(cl);
342
343	/*
344	 * If CBQ's WRR is enabled, then initialize the class WRR state.
345	 */
346	if (ifd->wrr_) {
347		ifd->num_[pri]++;
348		ifd->alloc_[pri] += cl->allotment_;
349		rmc_wrr_set_weights(ifd);
350	}
351	IFQ_UNLOCK(ifd->ifq_);
352	splx(s);
353	return (cl);
354}
355
356int
357rmc_modclass(struct rm_class *cl, u_int nsecPerByte, int maxq, u_int maxidle,
358    int minidle, u_int offtime, int pktsize)
359{
360	struct rm_ifdat	*ifd;
361	u_int		 old_allotment;
362	int		 s;
363
364	ifd = cl->ifdat_;
365	old_allotment = cl->allotment_;
366
367#ifdef __NetBSD__
368	s = splnet();
369#else
370	s = splimp();
371#endif
372	IFQ_LOCK(ifd->ifq_);
373	cl->allotment_ = RM_NS_PER_SEC / nsecPerByte; /* Bytes per sec */
374	cl->qthresh_ = 0;
375	cl->ns_per_byte_ = nsecPerByte;
376
377	qlimit(cl->q_) = maxq;
378
379#if 1 /* minidle is also scaled in ALTQ */
380	cl->minidle_ = (minidle * nsecPerByte) / 8;
381	if (cl->minidle_ > 0)
382		cl->minidle_ = 0;
383#else
384	cl->minidle_ = minidle;
385#endif
386	cl->maxidle_ = (maxidle * nsecPerByte) / 8;
387	if (cl->maxidle_ == 0)
388		cl->maxidle_ = 1;
389#if 1 /* offtime is also scaled in ALTQ */
390	cl->avgidle_ = cl->maxidle_;
391	cl->offtime_ = ((offtime * nsecPerByte) / 8) >> RM_FILTER_GAIN;
392	if (cl->offtime_ == 0)
393		cl->offtime_ = 1;
394#else
395	cl->avgidle_ = 0;
396	cl->offtime_ = (offtime * nsecPerByte) / 8;
397#endif
398
399	/*
400	 * If CBQ's WRR is enabled, then initialize the class WRR state.
401	 */
402	if (ifd->wrr_) {
403		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
404		rmc_wrr_set_weights(ifd);
405	}
406	IFQ_UNLOCK(ifd->ifq_);
407	splx(s);
408	return (0);
409}
410
411/*
412 * static void
413 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
414 *	the appropriate run robin weights for the CBQ weighted round robin
415 *	algorithm.
416 *
417 *	Returns: NONE
418 */
419
420static void
421rmc_wrr_set_weights(struct rm_ifdat *ifd)
422{
423	int		i;
424	struct rm_class	*cl, *clh;
425
426	for (i = 0; i < RM_MAXPRIO; i++) {
427		/*
428		 * This is inverted from that of the simulator to
429		 * maintain precision.
430		 */
431		if (ifd->num_[i] == 0)
432			ifd->M_[i] = 0;
433		else
434			ifd->M_[i] = ifd->alloc_[i] /
435				(ifd->num_[i] * ifd->maxpkt_);
436		/*
437		 * Compute the weighted allotment for each class.
438		 * This takes the expensive div instruction out
439		 * of the main loop for the wrr scheduling path.
440		 * These only get recomputed when a class comes or
441		 * goes.
442		 */
443		if (ifd->active_[i] != NULL) {
444			clh = cl = ifd->active_[i];
445			do {
446				/* safe-guard for slow link or alloc_ == 0 */
447				if (ifd->M_[i] == 0)
448					cl->w_allotment_ = 0;
449				else
450					cl->w_allotment_ = cl->allotment_ /
451						ifd->M_[i];
452				cl = cl->peer_;
453			} while ((cl != NULL) && (cl != clh));
454		}
455	}
456}
457
458int
459rmc_get_weight(struct rm_ifdat *ifd, int pri)
460{
461	if ((pri >= 0) && (pri < RM_MAXPRIO))
462		return (ifd->M_[pri]);
463	else
464		return (0);
465}
466
467/*
468 * static void
469 * rmc_depth_compute(struct rm_class *cl) - This function computes the
470 *	appropriate depth of class 'cl' and its ancestors.
471 *
472 *	Returns:	NONE
473 */
474
475static void
476rmc_depth_compute(struct rm_class *cl)
477{
478	rm_class_t	*t = cl, *p;
479
480	/*
481	 * Recompute the depth for the branch of the tree.
482	 */
483	while (t != NULL) {
484		p = t->parent_;
485		if (p && (t->depth_ >= p->depth_)) {
486			p->depth_ = t->depth_ + 1;
487			t = p;
488		} else
489			t = NULL;
490	}
491}
492
493/*
494 * static void
495 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
496 *	the depth of the tree after a class has been deleted.
497 *
498 *	Returns:	NONE
499 */
500
501static void
502rmc_depth_recompute(rm_class_t *cl)
503{
504#if 1 /* ALTQ */
505	rm_class_t	*p, *t;
506
507	p = cl;
508	while (p != NULL) {
509		if ((t = p->children_) == NULL) {
510			p->depth_ = 0;
511		} else {
512			int cdepth = 0;
513
514			while (t != NULL) {
515				if (t->depth_ > cdepth)
516					cdepth = t->depth_;
517				t = t->next_;
518			}
519
520			if (p->depth_ == cdepth + 1)
521				/* no change to this parent */
522				return;
523
524			p->depth_ = cdepth + 1;
525		}
526
527		p = p->parent_;
528	}
529#else
530	rm_class_t	*t;
531
532	if (cl->depth_ >= 1) {
533		if (cl->children_ == NULL) {
534			cl->depth_ = 0;
535		} else if ((t = cl->children_) != NULL) {
536			while (t != NULL) {
537				if (t->children_ != NULL)
538					rmc_depth_recompute(t);
539				t = t->next_;
540			}
541		} else
542			rmc_depth_compute(cl);
543	}
544#endif
545}
546
547/*
548 * void
549 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
550 *	function deletes a class from the link-sharing structure and frees
551 *	all resources associated with the class.
552 *
553 *	Returns: NONE
554 */
555
556void
557rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
558{
559	struct rm_class	*p, *head, *previous;
560	int		 s;
561
562	ASSERT(cl->children_ == NULL);
563
564	if (cl->sleeping_)
565		CALLOUT_STOP(&cl->callout_);
566
567#ifdef __NetBSD__
568	s = splnet();
569#else
570	s = splimp();
571#endif
572	IFQ_LOCK(ifd->ifq_);
573	/*
574	 * Free packets in the packet queue.
575	 * XXX - this may not be a desired behavior.  Packets should be
576	 *		re-queued.
577	 */
578	rmc_dropall(cl);
579
580	/*
581	 * If the class has a parent, then remove the class from the
582	 * class from the parent's children chain.
583	 */
584	if (cl->parent_ != NULL) {
585		head = cl->parent_->children_;
586		p = previous = head;
587		if (head->next_ == NULL) {
588			ASSERT(head == cl);
589			cl->parent_->children_ = NULL;
590			cl->parent_->leaf_ = 1;
591		} else while (p != NULL) {
592			if (p == cl) {
593				if (cl == head)
594					cl->parent_->children_ = cl->next_;
595				else
596					previous->next_ = cl->next_;
597				cl->next_ = NULL;
598				p = NULL;
599			} else {
600				previous = p;
601				p = p->next_;
602			}
603		}
604	}
605
606	/*
607	 * Delete class from class priority peer list.
608	 */
609	if ((p = ifd->active_[cl->pri_]) != NULL) {
610		/*
611		 * If there is more than one member of this priority
612		 * level, then look for class(cl) in the priority level.
613		 */
614		if (p != p->peer_) {
615			while (p->peer_ != cl)
616				p = p->peer_;
617			p->peer_ = cl->peer_;
618
619			if (ifd->active_[cl->pri_] == cl)
620				ifd->active_[cl->pri_] = cl->peer_;
621		} else {
622			ASSERT(p == cl);
623			ifd->active_[cl->pri_] = NULL;
624		}
625	}
626
627	/*
628	 * Recompute the WRR weights.
629	 */
630	if (ifd->wrr_) {
631		ifd->alloc_[cl->pri_] -= cl->allotment_;
632		ifd->num_[cl->pri_]--;
633		rmc_wrr_set_weights(ifd);
634	}
635
636	/*
637	 * Re-compute the depth of the tree.
638	 */
639#if 1 /* ALTQ */
640	rmc_depth_recompute(cl->parent_);
641#else
642	rmc_depth_recompute(ifd->root_);
643#endif
644
645	IFQ_UNLOCK(ifd->ifq_);
646	splx(s);
647
648	/*
649	 * Free the class structure.
650	 */
651	if (cl->red_ != NULL) {
652#ifdef ALTQ_RIO
653		if (q_is_rio(cl->q_))
654			rio_destroy((rio_t *)cl->red_);
655#endif
656#ifdef ALTQ_RED
657		if (q_is_red(cl->q_))
658			red_destroy(cl->red_);
659#endif
660	}
661	FREE(cl->q_, M_DEVBUF);
662	FREE(cl, M_DEVBUF);
663}
664
665
666/*
667 * void
668 * rmc_init(...) - Initialize the resource management data structures
669 *	associated with the output portion of interface 'ifp'.  'ifd' is
670 *	where the structures will be built (for backwards compatibility, the
671 *	structures aren't kept in the ifnet struct).  'nsecPerByte'
672 *	gives the link speed (inverse of bandwidth) in nanoseconds/byte.
673 *	'restart' is the driver-specific routine that the generic 'delay
674 *	until under limit' action will call to restart output.  `maxq'
675 *	is the queue size of the 'link' & 'default' classes.  'maxqueued'
676 *	is the maximum number of packets that the resource management
677 *	code will allow to be queued 'downstream' (this is typically 1).
678 *
679 *	Returns:	NONE
680 */
681
682void
683rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, u_int nsecPerByte,
684    void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
685    int minidle, u_int offtime, int flags)
686{
687	int		i, mtu;
688
689	/*
690	 * Initialize the CBQ tracing/debug facility.
691	 */
692	CBQTRACEINIT();
693
694	bzero((char *)ifd, sizeof (*ifd));
695	mtu = ifq->altq_ifp->if_mtu;
696	ifd->ifq_ = ifq;
697	ifd->restart = restart;
698	ifd->maxqueued_ = maxqueued;
699	ifd->ns_per_byte_ = nsecPerByte;
700	ifd->maxpkt_ = mtu;
701	ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
702	ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
703#if 1
704	ifd->maxiftime_ = mtu * nsecPerByte / 1000 * 16;
705	if (mtu * nsecPerByte > 10 * 1000000)
706		ifd->maxiftime_ /= 4;
707#endif
708
709	reset_cutoff(ifd);
710	CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
711
712	/*
713	 * Initialize the CBQ's WRR state.
714	 */
715	for (i = 0; i < RM_MAXPRIO; i++) {
716		ifd->alloc_[i] = 0;
717		ifd->M_[i] = 0;
718		ifd->num_[i] = 0;
719		ifd->na_[i] = 0;
720		ifd->active_[i] = NULL;
721	}
722
723	/*
724	 * Initialize current packet state.
725	 */
726	ifd->qi_ = 0;
727	ifd->qo_ = 0;
728	for (i = 0; i < RM_MAXQUEUED; i++) {
729		ifd->class_[i] = NULL;
730		ifd->curlen_[i] = 0;
731		ifd->borrowed_[i] = NULL;
732	}
733
734	/*
735	 * Create the root class of the link-sharing structure.
736	 */
737	if ((ifd->root_ = rmc_newclass(0, ifd,
738				       nsecPerByte,
739				       rmc_root_overlimit, maxq, 0, 0,
740				       maxidle, minidle, offtime,
741				       0, 0)) == NULL) {
742		printf("rmc_init: root class not allocated\n");
743		return ;
744	}
745	ifd->root_->depth_ = 0;
746}
747
748/*
749 * void
750 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
751 *	mbuf 'm' to queue for resource class 'cl'.  This routine is called
752 *	by a driver's if_output routine.  This routine must be called with
753 *	output packet completion interrupts locked out (to avoid racing with
754 *	rmc_dequeue_next).
755 *
756 *	Returns:	0 on successful queueing
757 *			-1 when packet drop occurs
758 */
759int
760rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
761{
762	struct timeval	 now;
763	struct rm_ifdat *ifd = cl->ifdat_;
764	int		 cpri = cl->pri_;
765	int		 is_empty = qempty(cl->q_);
766
767	RM_GETTIME(now);
768	if (ifd->cutoff_ > 0) {
769		if (TV_LT(&cl->undertime_, &now)) {
770			if (ifd->cutoff_ > cl->depth_)
771				ifd->cutoff_ = cl->depth_;
772			CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
773		}
774#if 1 /* ALTQ */
775		else {
776			/*
777			 * the class is overlimit. if the class has
778			 * underlimit ancestors, set cutoff to the lowest
779			 * depth among them.
780			 */
781			struct rm_class *borrow = cl->borrow_;
782
783			while (borrow != NULL &&
784			       borrow->depth_ < ifd->cutoff_) {
785				if (TV_LT(&borrow->undertime_, &now)) {
786					ifd->cutoff_ = borrow->depth_;
787					CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
788					break;
789				}
790				borrow = borrow->borrow_;
791			}
792		}
793#else /* !ALTQ */
794		else if ((ifd->cutoff_ > 1) && cl->borrow_) {
795			if (TV_LT(&cl->borrow_->undertime_, &now)) {
796				ifd->cutoff_ = cl->borrow_->depth_;
797				CBQTRACE(rmc_queue_packet, 'ffob',
798					 cl->borrow_->depth_);
799			}
800		}
801#endif /* !ALTQ */
802	}
803
804	if (_rmc_addq(cl, m) < 0)
805		/* failed */
806		return (-1);
807
808	if (is_empty) {
809		CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
810		ifd->na_[cpri]++;
811	}
812
813	if (qlen(cl->q_) > qlimit(cl->q_)) {
814		/* note: qlimit can be set to 0 or 1 */
815		rmc_drop_action(cl);
816		return (-1);
817	}
818	return (0);
819}
820
821/*
822 * void
823 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now) - Check all
824 *	classes to see if there are satified.
825 */
826
827static void
828rmc_tl_satisfied(struct rm_ifdat *ifd, struct timeval *now)
829{
830	int		 i;
831	rm_class_t	*p, *bp;
832
833	for (i = RM_MAXPRIO - 1; i >= 0; i--) {
834		if ((bp = ifd->active_[i]) != NULL) {
835			p = bp;
836			do {
837				if (!rmc_satisfied(p, now)) {
838					ifd->cutoff_ = p->depth_;
839					return;
840				}
841				p = p->peer_;
842			} while (p != bp);
843		}
844	}
845
846	reset_cutoff(ifd);
847}
848
849/*
850 * rmc_satisfied - Return 1 of the class is satisfied.  O, otherwise.
851 */
852
853static int
854rmc_satisfied(struct rm_class *cl, struct timeval *now)
855{
856	rm_class_t	*p;
857
858	if (cl == NULL)
859		return (1);
860	if (TV_LT(now, &cl->undertime_))
861		return (1);
862	if (cl->depth_ == 0) {
863		if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
864			return (0);
865		else
866			return (1);
867	}
868	if (cl->children_ != NULL) {
869		p = cl->children_;
870		while (p != NULL) {
871			if (!rmc_satisfied(p, now))
872				return (0);
873			p = p->next_;
874		}
875	}
876
877	return (1);
878}
879
880/*
881 * Return 1 if class 'cl' is under limit or can borrow from a parent,
882 * 0 if overlimit.  As a side-effect, this routine will invoke the
883 * class overlimit action if the class if overlimit.
884 */
885
886static int
887rmc_under_limit(struct rm_class *cl, struct timeval *now)
888{
889	rm_class_t	*p = cl;
890	rm_class_t	*top;
891	struct rm_ifdat	*ifd = cl->ifdat_;
892
893	ifd->borrowed_[ifd->qi_] = NULL;
894	/*
895	 * If cl is the root class, then always return that it is
896	 * underlimit.  Otherwise, check to see if the class is underlimit.
897	 */
898	if (cl->parent_ == NULL)
899		return (1);
900
901	if (cl->sleeping_) {
902		if (TV_LT(now, &cl->undertime_))
903			return (0);
904
905		CALLOUT_STOP(&cl->callout_);
906		cl->sleeping_ = 0;
907		cl->undertime_.tv_sec = 0;
908		return (1);
909	}
910
911	top = NULL;
912	while (cl->undertime_.tv_sec && TV_LT(now, &cl->undertime_)) {
913		if (((cl = cl->borrow_) == NULL) ||
914		    (cl->depth_ > ifd->cutoff_)) {
915#ifdef ADJUST_CUTOFF
916			if (cl != NULL)
917				/* cutoff is taking effect, just
918				   return false without calling
919				   the delay action. */
920				return (0);
921#endif
922#ifdef BORROW_OFFTIME
923			/*
924			 * check if the class can borrow offtime too.
925			 * borrow offtime from the top of the borrow
926			 * chain if the top class is not overloaded.
927			 */
928			if (cl != NULL) {
929				/* cutoff is taking effect, use this class as top. */
930				top = cl;
931				CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
932			}
933			if (top != NULL && top->avgidle_ == top->minidle_)
934				top = NULL;
935			p->overtime_ = *now;
936			(p->overlimit)(p, top);
937#else
938			p->overtime_ = *now;
939			(p->overlimit)(p, NULL);
940#endif
941			return (0);
942		}
943		top = cl;
944	}
945
946	if (cl != p)
947		ifd->borrowed_[ifd->qi_] = cl;
948	return (1);
949}
950
951/*
952 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
953 *	Packet-by-packet round robin.
954 *
955 * The heart of the weighted round-robin scheduler, which decides which
956 * class next gets to send a packet.  Highest priority first, then
957 * weighted round-robin within priorites.
958 *
959 * Each able-to-send class gets to send until its byte allocation is
960 * exhausted.  Thus, the active pointer is only changed after a class has
961 * exhausted its allocation.
962 *
963 * If the scheduler finds no class that is underlimit or able to borrow,
964 * then the first class found that had a nonzero queue and is allowed to
965 * borrow gets to send.
966 */
967
968static mbuf_t *
969_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
970{
971	struct rm_class	*cl = NULL, *first = NULL;
972	u_int		 deficit;
973	int		 cpri;
974	mbuf_t		*m;
975	struct timeval	 now;
976
977	RM_GETTIME(now);
978
979	/*
980	 * if the driver polls the top of the queue and then removes
981	 * the polled packet, we must return the same packet.
982	 */
983	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
984		cl = ifd->pollcache_;
985		cpri = cl->pri_;
986		if (ifd->efficient_) {
987			/* check if this class is overlimit */
988			if (cl->undertime_.tv_sec != 0 &&
989			    rmc_under_limit(cl, &now) == 0)
990				first = cl;
991		}
992		ifd->pollcache_ = NULL;
993		goto _wrr_out;
994	}
995	else {
996		/* mode == ALTDQ_POLL || pollcache == NULL */
997		ifd->pollcache_ = NULL;
998		ifd->borrowed_[ifd->qi_] = NULL;
999	}
1000#ifdef ADJUST_CUTOFF
1001 _again:
1002#endif
1003	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1004		if (ifd->na_[cpri] == 0)
1005			continue;
1006		deficit = 0;
1007		/*
1008		 * Loop through twice for a priority level, if some class
1009		 * was unable to send a packet the first round because
1010		 * of the weighted round-robin mechanism.
1011		 * During the second loop at this level, deficit==2.
1012		 * (This second loop is not needed if for every class,
1013		 * "M[cl->pri_])" times "cl->allotment" is greater than
1014		 * the byte size for the largest packet in the class.)
1015		 */
1016 _wrr_loop:
1017		cl = ifd->active_[cpri];
1018		ASSERT(cl != NULL);
1019		do {
1020			if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1021				cl->bytes_alloc_ += cl->w_allotment_;
1022			if (!qempty(cl->q_)) {
1023				if ((cl->undertime_.tv_sec == 0) ||
1024				    rmc_under_limit(cl, &now)) {
1025					if (cl->bytes_alloc_ > 0 || deficit > 1)
1026						goto _wrr_out;
1027
1028					/* underlimit but no alloc */
1029					deficit = 1;
1030#if 1
1031					ifd->borrowed_[ifd->qi_] = NULL;
1032#endif
1033				}
1034				else if (first == NULL && cl->borrow_ != NULL)
1035					first = cl; /* borrowing candidate */
1036			}
1037
1038			cl->bytes_alloc_ = 0;
1039			cl = cl->peer_;
1040		} while (cl != ifd->active_[cpri]);
1041
1042		if (deficit == 1) {
1043			/* first loop found an underlimit class with deficit */
1044			/* Loop on same priority level, with new deficit.  */
1045			deficit = 2;
1046			goto _wrr_loop;
1047		}
1048	}
1049
1050#ifdef ADJUST_CUTOFF
1051	/*
1052	 * no underlimit class found.  if cutoff is taking effect,
1053	 * increase cutoff and try again.
1054	 */
1055	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1056		ifd->cutoff_++;
1057		CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1058		goto _again;
1059	}
1060#endif /* ADJUST_CUTOFF */
1061	/*
1062	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1063	 * class we encounter will send a packet if all the classes
1064	 * of the link-sharing structure are overlimit.
1065	 */
1066	reset_cutoff(ifd);
1067	CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1068
1069	if (!ifd->efficient_ || first == NULL)
1070		return (NULL);
1071
1072	cl = first;
1073	cpri = cl->pri_;
1074#if 0	/* too time-consuming for nothing */
1075	if (cl->sleeping_)
1076		CALLOUT_STOP(&cl->callout_);
1077	cl->sleeping_ = 0;
1078	cl->undertime_.tv_sec = 0;
1079#endif
1080	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1081	ifd->cutoff_ = cl->borrow_->depth_;
1082
1083	/*
1084	 * Deque the packet and do the book keeping...
1085	 */
1086 _wrr_out:
1087	if (op == ALTDQ_REMOVE) {
1088		m = _rmc_getq(cl);
1089		if (m == NULL)
1090			panic("_rmc_wrr_dequeue_next");
1091		if (qempty(cl->q_))
1092			ifd->na_[cpri]--;
1093
1094		/*
1095		 * Update class statistics and link data.
1096		 */
1097		if (cl->bytes_alloc_ > 0)
1098			cl->bytes_alloc_ -= m_pktlen(m);
1099
1100		if ((cl->bytes_alloc_ <= 0) || first == cl)
1101			ifd->active_[cl->pri_] = cl->peer_;
1102		else
1103			ifd->active_[cl->pri_] = cl;
1104
1105		ifd->class_[ifd->qi_] = cl;
1106		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1107		ifd->now_[ifd->qi_] = now;
1108		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1109		ifd->queued_++;
1110	} else {
1111		/* mode == ALTDQ_PPOLL */
1112		m = _rmc_pollq(cl);
1113		ifd->pollcache_ = cl;
1114	}
1115	return (m);
1116}
1117
1118/*
1119 * Dequeue & return next packet from the highest priority class that
1120 * has a packet to send & has enough allocation to send it.  This
1121 * routine is called by a driver whenever it needs a new packet to
1122 * output.
1123 */
1124static mbuf_t *
1125_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1126{
1127	mbuf_t		*m;
1128	int		 cpri;
1129	struct rm_class	*cl, *first = NULL;
1130	struct timeval	 now;
1131
1132	RM_GETTIME(now);
1133
1134	/*
1135	 * if the driver polls the top of the queue and then removes
1136	 * the polled packet, we must return the same packet.
1137	 */
1138	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1139		cl = ifd->pollcache_;
1140		cpri = cl->pri_;
1141		ifd->pollcache_ = NULL;
1142		goto _prr_out;
1143	} else {
1144		/* mode == ALTDQ_POLL || pollcache == NULL */
1145		ifd->pollcache_ = NULL;
1146		ifd->borrowed_[ifd->qi_] = NULL;
1147	}
1148#ifdef ADJUST_CUTOFF
1149 _again:
1150#endif
1151	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1152		if (ifd->na_[cpri] == 0)
1153			continue;
1154		cl = ifd->active_[cpri];
1155		ASSERT(cl != NULL);
1156		do {
1157			if (!qempty(cl->q_)) {
1158				if ((cl->undertime_.tv_sec == 0) ||
1159				    rmc_under_limit(cl, &now))
1160					goto _prr_out;
1161				if (first == NULL && cl->borrow_ != NULL)
1162					first = cl;
1163			}
1164			cl = cl->peer_;
1165		} while (cl != ifd->active_[cpri]);
1166	}
1167
1168#ifdef ADJUST_CUTOFF
1169	/*
1170	 * no underlimit class found.  if cutoff is taking effect, increase
1171	 * cutoff and try again.
1172	 */
1173	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1174		ifd->cutoff_++;
1175		goto _again;
1176	}
1177#endif /* ADJUST_CUTOFF */
1178	/*
1179	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1180	 * class we encounter will send a packet if all the classes
1181	 * of the link-sharing structure are overlimit.
1182	 */
1183	reset_cutoff(ifd);
1184	if (!ifd->efficient_ || first == NULL)
1185		return (NULL);
1186
1187	cl = first;
1188	cpri = cl->pri_;
1189#if 0	/* too time-consuming for nothing */
1190	if (cl->sleeping_)
1191		CALLOUT_STOP(&cl->callout_);
1192	cl->sleeping_ = 0;
1193	cl->undertime_.tv_sec = 0;
1194#endif
1195	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1196	ifd->cutoff_ = cl->borrow_->depth_;
1197
1198	/*
1199	 * Deque the packet and do the book keeping...
1200	 */
1201 _prr_out:
1202	if (op == ALTDQ_REMOVE) {
1203		m = _rmc_getq(cl);
1204		if (m == NULL)
1205			panic("_rmc_prr_dequeue_next");
1206		if (qempty(cl->q_))
1207			ifd->na_[cpri]--;
1208
1209		ifd->active_[cpri] = cl->peer_;
1210
1211		ifd->class_[ifd->qi_] = cl;
1212		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1213		ifd->now_[ifd->qi_] = now;
1214		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1215		ifd->queued_++;
1216	} else {
1217		/* mode == ALTDQ_POLL */
1218		m = _rmc_pollq(cl);
1219		ifd->pollcache_ = cl;
1220	}
1221	return (m);
1222}
1223
1224/*
1225 * mbuf_t *
1226 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timeval *now) - this function
1227 *	is invoked by the packet driver to get the next packet to be
1228 *	dequeued and output on the link.  If WRR is enabled, then the
1229 *	WRR dequeue next routine will determine the next packet to sent.
1230 *	Otherwise, packet-by-packet round robin is invoked.
1231 *
1232 *	Returns:	NULL, if a packet is not available or if all
1233 *			classes are overlimit.
1234 *
1235 *			Otherwise, Pointer to the next packet.
1236 */
1237
1238mbuf_t *
1239rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1240{
1241	if (ifd->queued_ >= ifd->maxqueued_)
1242		return (NULL);
1243	else if (ifd->wrr_)
1244		return (_rmc_wrr_dequeue_next(ifd, mode));
1245	else
1246		return (_rmc_prr_dequeue_next(ifd, mode));
1247}
1248
1249/*
1250 * Update the utilization estimate for the packet that just completed.
1251 * The packet's class & the parent(s) of that class all get their
1252 * estimators updated.  This routine is called by the driver's output-
1253 * packet-completion interrupt service routine.
1254 */
1255
1256/*
1257 * a macro to approximate "divide by 1000" that gives 0.000999,
1258 * if a value has enough effective digits.
1259 * (on pentium, mul takes 9 cycles but div takes 46!)
1260 */
1261#define	NSEC_TO_USEC(t)	(((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1262void
1263rmc_update_class_util(struct rm_ifdat *ifd)
1264{
1265	int		 idle, avgidle, pktlen;
1266	int		 pkt_time, tidle;
1267	rm_class_t	*cl, *borrowed;
1268	rm_class_t	*borrows;
1269	struct timeval	*nowp;
1270
1271	/*
1272	 * Get the most recent completed class.
1273	 */
1274	if ((cl = ifd->class_[ifd->qo_]) == NULL)
1275		return;
1276
1277	pktlen = ifd->curlen_[ifd->qo_];
1278	borrowed = ifd->borrowed_[ifd->qo_];
1279	borrows = borrowed;
1280
1281	PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1282
1283	/*
1284	 * Run estimator on class and its ancestors.
1285	 */
1286	/*
1287	 * rm_update_class_util is designed to be called when the
1288	 * transfer is completed from a xmit complete interrupt,
1289	 * but most drivers don't implement an upcall for that.
1290	 * so, just use estimated completion time.
1291	 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1292	 */
1293	nowp = &ifd->now_[ifd->qo_];
1294	/* get pkt_time (for link) in usec */
1295#if 1  /* use approximation */
1296	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_;
1297	pkt_time = NSEC_TO_USEC(pkt_time);
1298#else
1299	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1300#endif
1301#if 1 /* ALTQ4PPP */
1302	if (TV_LT(nowp, &ifd->ifnow_)) {
1303		int iftime;
1304
1305		/*
1306		 * make sure the estimated completion time does not go
1307		 * too far.  it can happen when the link layer supports
1308		 * data compression or the interface speed is set to
1309		 * a much lower value.
1310		 */
1311		TV_DELTA(&ifd->ifnow_, nowp, iftime);
1312		if (iftime+pkt_time < ifd->maxiftime_) {
1313			TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1314		} else {
1315			TV_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1316		}
1317	} else {
1318		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1319	}
1320#else
1321	if (TV_LT(nowp, &ifd->ifnow_)) {
1322		TV_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1323	} else {
1324		TV_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1325	}
1326#endif
1327
1328	while (cl != NULL) {
1329		TV_DELTA(&ifd->ifnow_, &cl->last_, idle);
1330		if (idle >= 2000000)
1331			/*
1332			 * this class is idle enough, reset avgidle.
1333			 * (TV_DELTA returns 2000000 us when delta is large.)
1334			 */
1335			cl->avgidle_ = cl->maxidle_;
1336
1337		/* get pkt_time (for class) in usec */
1338#if 1  /* use approximation */
1339		pkt_time = pktlen * cl->ns_per_byte_;
1340		pkt_time = NSEC_TO_USEC(pkt_time);
1341#else
1342		pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1343#endif
1344		idle -= pkt_time;
1345
1346		avgidle = cl->avgidle_;
1347		avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1348		cl->avgidle_ = avgidle;
1349
1350		/* Are we overlimit ? */
1351		if (avgidle <= 0) {
1352			CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1353#if 1 /* ALTQ */
1354			/*
1355			 * need some lower bound for avgidle, otherwise
1356			 * a borrowing class gets unbounded penalty.
1357			 */
1358			if (avgidle < cl->minidle_)
1359				avgidle = cl->avgidle_ = cl->minidle_;
1360#endif
1361			/* set next idle to make avgidle 0 */
1362			tidle = pkt_time +
1363				(((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1364			TV_ADD_DELTA(nowp, tidle, &cl->undertime_);
1365			++cl->stats_.over;
1366		} else {
1367			cl->avgidle_ =
1368			    (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1369			cl->undertime_.tv_sec = 0;
1370			if (cl->sleeping_) {
1371				CALLOUT_STOP(&cl->callout_);
1372				cl->sleeping_ = 0;
1373			}
1374		}
1375
1376		if (borrows != NULL) {
1377			if (borrows != cl)
1378				++cl->stats_.borrows;
1379			else
1380				borrows = NULL;
1381		}
1382		cl->last_ = ifd->ifnow_;
1383		cl->last_pkttime_ = pkt_time;
1384
1385#if 1
1386		if (cl->parent_ == NULL) {
1387			/* take stats of root class */
1388			PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1389		}
1390#endif
1391
1392		cl = cl->parent_;
1393	}
1394
1395	/*
1396	 * Check to see if cutoff needs to set to a new level.
1397	 */
1398	cl = ifd->class_[ifd->qo_];
1399	if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1400#if 1 /* ALTQ */
1401		if ((qlen(cl->q_) <= 0) || TV_LT(nowp, &borrowed->undertime_)) {
1402			rmc_tl_satisfied(ifd, nowp);
1403			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1404		} else {
1405			ifd->cutoff_ = borrowed->depth_;
1406			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1407		}
1408#else /* !ALTQ */
1409		if ((qlen(cl->q_) <= 1) || TV_LT(&now, &borrowed->undertime_)) {
1410			reset_cutoff(ifd);
1411#ifdef notdef
1412			rmc_tl_satisfied(ifd, &now);
1413#endif
1414			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1415		} else {
1416			ifd->cutoff_ = borrowed->depth_;
1417			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1418		}
1419#endif /* !ALTQ */
1420	}
1421
1422	/*
1423	 * Release class slot
1424	 */
1425	ifd->borrowed_[ifd->qo_] = NULL;
1426	ifd->class_[ifd->qo_] = NULL;
1427	ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1428	ifd->queued_--;
1429}
1430
1431/*
1432 * void
1433 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1434 *	over-limit action routines.  These get invoked by rmc_under_limit()
1435 *	if a class with packets to send if over its bandwidth limit & can't
1436 *	borrow from a parent class.
1437 *
1438 *	Returns: NONE
1439 */
1440
1441static void
1442rmc_drop_action(struct rm_class *cl)
1443{
1444	struct rm_ifdat	*ifd = cl->ifdat_;
1445
1446	ASSERT(qlen(cl->q_) > 0);
1447	_rmc_dropq(cl);
1448	if (qempty(cl->q_))
1449		ifd->na_[cl->pri_]--;
1450}
1451
1452void rmc_dropall(struct rm_class *cl)
1453{
1454	struct rm_ifdat	*ifd = cl->ifdat_;
1455
1456	if (!qempty(cl->q_)) {
1457		_flushq(cl->q_);
1458
1459		ifd->na_[cl->pri_]--;
1460	}
1461}
1462
1463#if (__FreeBSD_version > 300000)
1464/* hzto() is removed from FreeBSD-3.0 */
1465static int hzto(struct timeval *);
1466
1467static int
1468hzto(tv)
1469	struct timeval *tv;
1470{
1471	struct timeval t2;
1472
1473	getmicrotime(&t2);
1474	t2.tv_sec = tv->tv_sec - t2.tv_sec;
1475	t2.tv_usec = tv->tv_usec - t2.tv_usec;
1476	return (tvtohz(&t2));
1477}
1478#endif /* __FreeBSD_version > 300000 */
1479
1480/*
1481 * void
1482 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1483 *	delay action routine.  It is invoked via rmc_under_limit when the
1484 *	packet is discoverd to be overlimit.
1485 *
1486 *	If the delay action is result of borrow class being overlimit, then
1487 *	delay for the offtime of the borrowing class that is overlimit.
1488 *
1489 *	Returns: NONE
1490 */
1491
1492void
1493rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1494{
1495	int	delay, t, extradelay;
1496
1497	cl->stats_.overactions++;
1498	TV_DELTA(&cl->undertime_, &cl->overtime_, delay);
1499#ifndef BORROW_OFFTIME
1500	delay += cl->offtime_;
1501#endif
1502
1503	if (!cl->sleeping_) {
1504		CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1505#ifdef BORROW_OFFTIME
1506		if (borrow != NULL)
1507			extradelay = borrow->offtime_;
1508		else
1509#endif
1510			extradelay = cl->offtime_;
1511
1512#ifdef ALTQ
1513		/*
1514		 * XXX recalculate suspend time:
1515		 * current undertime is (tidle + pkt_time) calculated
1516		 * from the last transmission.
1517		 *	tidle: time required to bring avgidle back to 0
1518		 *	pkt_time: target waiting time for this class
1519		 * we need to replace pkt_time by offtime
1520		 */
1521		extradelay -= cl->last_pkttime_;
1522#endif
1523		if (extradelay > 0) {
1524			TV_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1525			delay += extradelay;
1526		}
1527
1528		cl->sleeping_ = 1;
1529		cl->stats_.delays++;
1530
1531		/*
1532		 * Since packets are phased randomly with respect to the
1533		 * clock, 1 tick (the next clock tick) can be an arbitrarily
1534		 * short time so we have to wait for at least two ticks.
1535		 * NOTE:  If there's no other traffic, we need the timer as
1536		 * a 'backstop' to restart this class.
1537		 */
1538		if (delay > tick * 2) {
1539#ifdef __FreeBSD__
1540			/* FreeBSD rounds up the tick */
1541			t = hzto(&cl->undertime_);
1542#else
1543			/* other BSDs round down the tick */
1544			t = hzto(&cl->undertime_) + 1;
1545#endif
1546		} else
1547			t = 2;
1548		CALLOUT_RESET(&cl->callout_, t,
1549			      (timeout_t *)rmc_restart, (caddr_t)cl);
1550	}
1551}
1552
1553/*
1554 * void
1555 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1556 *	called by the system timer code & is responsible checking if the
1557 *	class is still sleeping (it might have been restarted as a side
1558 *	effect of the queue scan on a packet arrival) and, if so, restarting
1559 *	output for the class.  Inspecting the class state & restarting output
1560 *	require locking the class structure.  In general the driver is
1561 *	responsible for locking but this is the only routine that is not
1562 *	called directly or indirectly from the interface driver so it has
1563 *	know about system locking conventions.  Under bsd, locking is done
1564 *	by raising IPL to splimp so that's what's implemented here.  On a
1565 *	different system this would probably need to be changed.
1566 *
1567 *	Returns:	NONE
1568 */
1569
1570static void
1571rmc_restart(struct rm_class *cl)
1572{
1573	struct rm_ifdat	*ifd = cl->ifdat_;
1574	int		 s;
1575
1576#ifdef __NetBSD__
1577	s = splnet();
1578#else
1579	s = splimp();
1580#endif
1581	IFQ_LOCK(ifd->ifq_);
1582	if (cl->sleeping_) {
1583		cl->sleeping_ = 0;
1584		cl->undertime_.tv_sec = 0;
1585
1586		if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1587			CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1588			(ifd->restart)(ifd->ifq_);
1589		}
1590	}
1591	IFQ_UNLOCK(ifd->ifq_);
1592	splx(s);
1593}
1594
1595/*
1596 * void
1597 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1598 *	handling routine for the root class of the link sharing structure.
1599 *
1600 *	Returns: NONE
1601 */
1602
1603static void
1604rmc_root_overlimit(struct rm_class *cl, struct rm_class *borrow)
1605{
1606    panic("rmc_root_overlimit");
1607}
1608
1609/*
1610 * Packet Queue handling routines.  Eventually, this is to localize the
1611 *	effects on the code whether queues are red queues or droptail
1612 *	queues.
1613 */
1614
1615static int
1616_rmc_addq(rm_class_t *cl, mbuf_t *m)
1617{
1618#ifdef ALTQ_RIO
1619	if (q_is_rio(cl->q_))
1620		return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1621#endif
1622#ifdef ALTQ_RED
1623	if (q_is_red(cl->q_))
1624		return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1625#endif /* ALTQ_RED */
1626
1627	if (cl->flags_ & RMCF_CLEARDSCP)
1628		write_dsfield(m, cl->pktattr_, 0);
1629
1630	_addq(cl->q_, m);
1631	return (0);
1632}
1633
1634/* note: _rmc_dropq is not called for red */
1635static void
1636_rmc_dropq(rm_class_t *cl)
1637{
1638	mbuf_t	*m;
1639
1640	if ((m = _getq(cl->q_)) != NULL)
1641		m_freem(m);
1642}
1643
1644static mbuf_t *
1645_rmc_getq(rm_class_t *cl)
1646{
1647#ifdef ALTQ_RIO
1648	if (q_is_rio(cl->q_))
1649		return rio_getq((rio_t *)cl->red_, cl->q_);
1650#endif
1651#ifdef ALTQ_RED
1652	if (q_is_red(cl->q_))
1653		return red_getq(cl->red_, cl->q_);
1654#endif
1655	return _getq(cl->q_);
1656}
1657
1658static mbuf_t *
1659_rmc_pollq(rm_class_t *cl)
1660{
1661	return qhead(cl->q_);
1662}
1663
1664#ifdef CBQ_TRACE
1665
1666struct cbqtrace		 cbqtrace_buffer[NCBQTRACE+1];
1667struct cbqtrace		*cbqtrace_ptr = NULL;
1668int			 cbqtrace_count;
1669
1670/*
1671 * DDB hook to trace cbq events:
1672 *  the last 1024 events are held in a circular buffer.
1673 *  use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1674 */
1675void cbqtrace_dump(int);
1676static char *rmc_funcname(void *);
1677
1678static struct rmc_funcs {
1679	void	*func;
1680	char	*name;
1681} rmc_funcs[] =
1682{
1683	rmc_init,		"rmc_init",
1684	rmc_queue_packet,	"rmc_queue_packet",
1685	rmc_under_limit,	"rmc_under_limit",
1686	rmc_update_class_util,	"rmc_update_class_util",
1687	rmc_delay_action,	"rmc_delay_action",
1688	rmc_restart,		"rmc_restart",
1689	_rmc_wrr_dequeue_next,	"_rmc_wrr_dequeue_next",
1690	NULL,			NULL
1691};
1692
1693static char *rmc_funcname(void *func)
1694{
1695	struct rmc_funcs *fp;
1696
1697	for (fp = rmc_funcs; fp->func != NULL; fp++)
1698		if (fp->func == func)
1699			return (fp->name);
1700	return ("unknown");
1701}
1702
1703void cbqtrace_dump(int counter)
1704{
1705	int	 i, *p;
1706	char	*cp;
1707
1708	counter = counter % NCBQTRACE;
1709	p = (int *)&cbqtrace_buffer[counter];
1710
1711	for (i=0; i<20; i++) {
1712		printf("[0x%x] ", *p++);
1713		printf("%s: ", rmc_funcname((void *)*p++));
1714		cp = (char *)p++;
1715		printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1716		printf("%d\n",*p++);
1717
1718		if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1719			p = (int *)cbqtrace_buffer;
1720	}
1721}
1722#endif /* CBQ_TRACE */
1723#endif /* ALTQ_CBQ */
1724
1725#if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
1726#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1727
1728void
1729_addq(class_queue_t *q, mbuf_t *m)
1730{
1731        mbuf_t	*m0;
1732
1733	if ((m0 = qtail(q)) != NULL)
1734		m->m_nextpkt = m0->m_nextpkt;
1735	else
1736		m0 = m;
1737	m0->m_nextpkt = m;
1738	qtail(q) = m;
1739	qlen(q)++;
1740}
1741
1742mbuf_t *
1743_getq(class_queue_t *q)
1744{
1745	mbuf_t	*m, *m0;
1746
1747	if ((m = qtail(q)) == NULL)
1748		return (NULL);
1749	if ((m0 = m->m_nextpkt) != m)
1750		m->m_nextpkt = m0->m_nextpkt;
1751	else {
1752		ASSERT(qlen(q) == 1);
1753		qtail(q) = NULL;
1754	}
1755	qlen(q)--;
1756	m0->m_nextpkt = NULL;
1757	return (m0);
1758}
1759
1760/* drop a packet at the tail of the queue */
1761mbuf_t *
1762_getq_tail(class_queue_t *q)
1763{
1764	mbuf_t	*m, *m0, *prev;
1765
1766	if ((m = m0 = qtail(q)) == NULL)
1767		return NULL;
1768	do {
1769		prev = m0;
1770		m0 = m0->m_nextpkt;
1771	} while (m0 != m);
1772	prev->m_nextpkt = m->m_nextpkt;
1773	if (prev == m)  {
1774		ASSERT(qlen(q) == 1);
1775		qtail(q) = NULL;
1776	} else
1777		qtail(q) = prev;
1778	qlen(q)--;
1779	m->m_nextpkt = NULL;
1780	return (m);
1781}
1782
1783/* randomly select a packet in the queue */
1784mbuf_t *
1785_getq_random(class_queue_t *q)
1786{
1787	struct mbuf	*m;
1788	int		 i, n;
1789
1790	if ((m = qtail(q)) == NULL)
1791		return NULL;
1792	if (m->m_nextpkt == m) {
1793		ASSERT(qlen(q) == 1);
1794		qtail(q) = NULL;
1795	} else {
1796		struct mbuf *prev = NULL;
1797
1798		n = arc4random() % qlen(q) + 1;
1799		for (i = 0; i < n; i++) {
1800			prev = m;
1801			m = m->m_nextpkt;
1802		}
1803		prev->m_nextpkt = m->m_nextpkt;
1804		if (m == qtail(q))
1805			qtail(q) = prev;
1806	}
1807	qlen(q)--;
1808	m->m_nextpkt = NULL;
1809	return (m);
1810}
1811
1812void
1813_removeq(class_queue_t *q, mbuf_t *m)
1814{
1815	mbuf_t	*m0, *prev;
1816
1817	m0 = qtail(q);
1818	do {
1819		prev = m0;
1820		m0 = m0->m_nextpkt;
1821	} while (m0 != m);
1822	prev->m_nextpkt = m->m_nextpkt;
1823	if (prev == m)
1824		qtail(q) = NULL;
1825	else if (qtail(q) == m)
1826		qtail(q) = prev;
1827	qlen(q)--;
1828}
1829
1830void
1831_flushq(class_queue_t *q)
1832{
1833	mbuf_t *m;
1834
1835	while ((m = _getq(q)) != NULL)
1836		m_freem(m);
1837	ASSERT(qlen(q) == 0);
1838}
1839
1840#endif /* !__GNUC__ || ALTQ_DEBUG */
1841#endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
1842