1/*	$NetBSD: altq_rmclass.c,v 1.30 2024/02/09 22:08:31 andvar Exp $	*/
2/*	$KAME: altq_rmclass.c,v 1.19 2005/04/13 03:44:25 suz Exp $	*/
3
4/*
5 * Copyright (c) 1991-1997 Regents of the University of California.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by the Network Research
19 *      Group at Lawrence Berkeley Laboratory.
20 * 4. Neither the name of the University nor of the Laboratory may be used
21 *    to endorse or promote products derived from this software without
22 *    specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 * LBL code modified by speer@eng.sun.com, May 1977.
37 * For questions and/or comments, please send mail to cbq@ee.lbl.gov
38 */
39
40#include <sys/cdefs.h>
41__KERNEL_RCSID(0, "$NetBSD: altq_rmclass.c,v 1.30 2024/02/09 22:08:31 andvar Exp $");
42
43/* #ident "@(#)rm_class.c  1.48     97/12/05 SMI" */
44
45#ifdef _KERNEL_OPT
46#include "opt_altq.h"
47#include "opt_inet.h"
48#endif
49
50#ifdef ALTQ_CBQ	/* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
51
52#include <sys/param.h>
53#include <sys/malloc.h>
54#include <sys/mbuf.h>
55#include <sys/socket.h>
56#include <sys/systm.h>
57#include <sys/errno.h>
58#include <sys/time.h>
59#ifdef ALTQ3_COMPAT
60#include <sys/kernel.h>
61#endif
62#include <sys/cprng.h>
63
64#include <net/if.h>
65#include <net/if_types.h>
66#ifdef ALTQ3_COMPAT
67#include <netinet/in.h>
68#include <netinet/in_systm.h>
69#include <netinet/ip.h>
70#endif
71
72#include <altq/altq.h>
73#include <altq/altq_rmclass.h>
74#include <altq/altq_rmclass_debug.h>
75#include <altq/altq_red.h>
76#include <altq/altq_rio.h>
77
78/*
79 * Local Macros
80 */
81
82#define	reset_cutoff(ifd)	{ ifd->cutoff_ = RM_MAXDEPTH; }
83
84#define	PSEC_TO_NSEC(t)	((t) / 1000)
85
86/*
87 * Local routines.
88 */
89
90static int	rmc_satisfied(struct rm_class *, struct timespec *);
91static void	rmc_wrr_set_weights(struct rm_ifdat *);
92static void	rmc_depth_compute(struct rm_class *);
93static void	rmc_depth_recompute(rm_class_t *);
94
95static mbuf_t	*_rmc_wrr_dequeue_next(struct rm_ifdat *, int);
96static mbuf_t	*_rmc_prr_dequeue_next(struct rm_ifdat *, int);
97
98static int	_rmc_addq(rm_class_t *, mbuf_t *);
99static void	_rmc_dropq(rm_class_t *);
100static mbuf_t	*_rmc_getq(rm_class_t *);
101static mbuf_t	*_rmc_pollq(rm_class_t *);
102
103static int	rmc_under_limit(struct rm_class *, struct timespec *);
104static void	rmc_tl_satisfied(struct rm_ifdat *, struct timespec *);
105static void	rmc_drop_action(struct rm_class *);
106static void	rmc_restart(struct rm_class *);
107static void	rmc_root_overlimit(struct rm_class *, struct rm_class *);
108
109#define	BORROW_OFFTIME
110/*
111 * BORROW_OFFTIME (experimental):
112 * borrow the offtime of the class borrowing from.
113 * the reason is that when its own offtime is set, the class is unable
114 * to borrow much, especially when cutoff is taking effect.
115 * but when the borrowed class is overloaded (advidle is close to minidle),
116 * use the borrowing class's offtime to avoid overload.
117 */
118#define	ADJUST_CUTOFF
119/*
120 * ADJUST_CUTOFF (experimental):
121 * if no underlimit class is found due to cutoff, increase cutoff and
122 * retry the scheduling loop.
123 * also, don't invoke delay_actions while cutoff is taking effect,
124 * since a sleeping class won't have a chance to be scheduled in the
125 * next loop.
126 *
127 * now heuristics for setting the top-level variable (cutoff_) becomes:
128 *	1. if a packet arrives for a not-overlimit class, set cutoff
129 *	   to the depth of the class.
130 *	2. if cutoff is i, and a packet arrives for an overlimit class
131 *	   with an underlimit ancestor at a lower level than i (say j),
132 *	   then set cutoff to j.
133 *	3. at scheduling a packet, if there is no underlimit class
134 *	   due to the current cutoff level, increase cutoff by 1 and
135 *	   then try to schedule again.
136 */
137
138/*
139 * rm_class_t *
140 * rmc_newclass(...) - Create a new resource management class at priority
141 * 'pri' on the interface given by 'ifd'.
142 *
143 * nsecPerByte  is the data rate of the interface in nanoseconds/byte.
144 *              E.g., 800 for a 10Mb/s ethernet.  If the class gets less
145 *              than 100% of the bandwidth, this number should be the
146 *              'effective' rate for the class.  Let f be the
147 *              bandwidth fraction allocated to this class, and let
148 *              nsPerByte be the data rate of the output link in
149 *              nanoseconds/byte.  Then nsecPerByte is set to
150 *              nsPerByte / f.  E.g., 1600 (= 800 / .5)
151 *              for a class that gets 50% of an ethernet's bandwidth.
152 *
153 * action       the routine to call when the class is over limit.
154 *
155 * maxq         max allowable queue size for class (in packets).
156 *
157 * parent       parent class pointer.
158 *
159 * borrow       class to borrow from (should be either 'parent' or null).
160 *
161 * maxidle      max value allowed for class 'idle' time estimate (this
162 *              parameter determines how large an initial burst of packets
163 *              can be before overlimit action is invoked.
164 *
165 * offtime      how long 'delay' action will delay when class goes over
166 *              limit (this parameter determines the steady-state burst
167 *              size when a class is running over its limit).
168 *
169 * Maxidle and offtime have to be computed from the following:  If the
170 * average packet size is s, the bandwidth fraction allocated to this
171 * class is f, we want to allow b packet bursts, and the gain of the
172 * averaging filter is g (= 1 - 2^(-RM_FILTER_GAIN)), then:
173 *
174 *   ptime = s * nsPerByte * (1 - f) / f
175 *   maxidle = ptime * (1 - g^b) / g^b
176 *   minidle = -ptime * (1 / (f - 1))
177 *   offtime = ptime * (1 + 1/(1 - g) * (1 - g^(b - 1)) / g^(b - 1)
178 *
179 * Operationally, it's convenient to specify maxidle & offtime in units
180 * independent of the link bandwidth so the maxidle & offtime passed to
181 * this routine are the above values multiplied by 8*f/(1000*nsPerByte).
182 * (The constant factor is a scale factor needed to make the parameters
183 * integers.  This scaling also means that the 'unscaled' values of
184 * maxidle*nsecPerByte/8 and offtime*nsecPerByte/8 will be in microseconds,
185 * not nanoseconds.)  Also note that the 'idle' filter computation keeps
186 * an estimate scaled upward by 2^RM_FILTER_GAIN so the passed value of
187 * maxidle also must be scaled upward by this value.  Thus, the passed
188 * values for maxidle and offtime can be computed as follows:
189 *
190 * maxidle = maxidle * 2^RM_FILTER_GAIN * 8 / (1000 * nsecPerByte)
191 * offtime = offtime * 8 / (1000 * nsecPerByte)
192 *
193 * When USE_HRTIME is employed, then maxidle and offtime become:
194 * 	maxidle = maxilde * (8.0 / nsecPerByte);
195 * 	offtime = offtime * (8.0 / nsecPerByte);
196 */
197struct rm_class *
198rmc_newclass(int pri, struct rm_ifdat *ifd, uint64_t psecPerByte,
199    void (*action)(rm_class_t *, rm_class_t *), int maxq,
200    struct rm_class *parent, struct rm_class *borrow, u_int maxidle,
201    int minidle, u_int offtime, int pktsize, int flags)
202{
203	struct rm_class	*cl;
204	struct rm_class	*peer;
205	int		 s;
206
207	if (pri >= RM_MAXPRIO)
208		return (NULL);
209#ifndef ALTQ_RED
210	if (flags & RMCF_RED) {
211#ifdef ALTQ_DEBUG
212		printf("rmc_newclass: RED not configured for CBQ!\n");
213#endif
214		return (NULL);
215	}
216#endif
217#ifndef ALTQ_RIO
218	if (flags & RMCF_RIO) {
219#ifdef ALTQ_DEBUG
220		printf("rmc_newclass: RIO not configured for CBQ!\n");
221#endif
222		return (NULL);
223	}
224#endif
225
226	cl = malloc(sizeof(struct rm_class), M_DEVBUF, M_WAITOK|M_ZERO);
227	if (cl == NULL)
228		return (NULL);
229	CALLOUT_INIT(&cl->callout_);
230
231	cl->q_ = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
232	if (cl->q_ == NULL) {
233		free(cl, M_DEVBUF);
234		return (NULL);
235	}
236
237	/*
238	 * Class initialization.
239	 */
240	cl->children_ = NULL;
241	cl->parent_ = parent;
242	cl->borrow_ = borrow;
243	cl->leaf_ = 1;
244	cl->ifdat_ = ifd;
245	cl->pri_ = pri;
246	cl->allotment_ = (u_int)(RM_PS_PER_SEC / psecPerByte); /* Bytes per sec */
247	cl->depth_ = 0;
248	cl->qthresh_ = 0;
249	cl->ps_per_byte_ = psecPerByte;
250
251	qlimit(cl->q_) = maxq;
252	qtype(cl->q_) = Q_DROPHEAD;
253	qlen(cl->q_) = 0;
254	cl->flags_ = flags;
255
256#if 1 /* minidle is also scaled in ALTQ */
257	cl->minidle_ = ((int64_t)minidle * (int64_t)psecPerByte) / 8;
258	if (cl->minidle_ > 0)
259		cl->minidle_ = 0;
260#else
261	cl->minidle_ = minidle;
262#endif
263	cl->maxidle_ = ((int64_t)maxidle * (int64_t)psecPerByte) / 8;
264	if (cl->maxidle_ == 0)
265		cl->maxidle_ = 1;
266#if 1 /* offtime is also scaled in ALTQ */
267	cl->avgidle_ = cl->maxidle_;
268	cl->offtime_ = (((int64_t)offtime * (int64_t)psecPerByte) / 8) >> RM_FILTER_GAIN;
269	if (cl->offtime_ == 0)
270		cl->offtime_ = 1;
271#else
272	cl->avgidle_ = 0;
273	cl->offtime_ = (offtime * nsecPerByte) / 8;
274#endif
275	cl->overlimit = action;
276
277#ifdef ALTQ_RED
278	if (flags & (RMCF_RED|RMCF_RIO)) {
279		int red_flags, red_pkttime;
280
281		red_flags = 0;
282		if (flags & RMCF_ECN)
283			red_flags |= REDF_ECN;
284		if (flags & RMCF_FLOWVALVE)
285			red_flags |= REDF_FLOWVALVE;
286#ifdef ALTQ_RIO
287		if (flags & RMCF_CLEARDSCP)
288			red_flags |= RIOF_CLEARDSCP;
289#endif
290		red_pkttime = PSEC_TO_NSEC(psecPerByte) * pktsize  / 1000;
291
292		if (flags & RMCF_RED) {
293			cl->red_ = red_alloc(0, 0,
294			    qlimit(cl->q_) * 10/100,
295			    qlimit(cl->q_) * 30/100,
296			    red_flags, red_pkttime);
297			if (cl->red_ != NULL)
298				qtype(cl->q_) = Q_RED;
299		}
300#ifdef ALTQ_RIO
301		else {
302			cl->red_ = (red_t *)rio_alloc(0, NULL,
303						      red_flags, red_pkttime);
304			if (cl->red_ != NULL)
305				qtype(cl->q_) = Q_RIO;
306		}
307#endif
308	}
309#endif /* ALTQ_RED */
310
311	/*
312	 * put the class into the class tree
313	 */
314	s = splnet();
315	if ((peer = ifd->active_[pri]) != NULL) {
316		/* find the last class at this pri */
317		cl->peer_ = peer;
318		while (peer->peer_ != ifd->active_[pri])
319			peer = peer->peer_;
320		peer->peer_ = cl;
321	} else {
322		ifd->active_[pri] = cl;
323		cl->peer_ = cl;
324	}
325
326	if (cl->parent_) {
327		cl->next_ = parent->children_;
328		parent->children_ = cl;
329		parent->leaf_ = 0;
330	}
331
332	/*
333	 * Compute the depth of this class and its ancestors in the class
334	 * hierarchy.
335	 */
336	rmc_depth_compute(cl);
337
338	/*
339	 * If CBQ's WRR is enabled, then initialize the class WRR state.
340	 */
341	if (ifd->wrr_) {
342		ifd->num_[pri]++;
343		ifd->alloc_[pri] += cl->allotment_;
344		rmc_wrr_set_weights(ifd);
345	}
346	splx(s);
347	return (cl);
348}
349
350int
351rmc_modclass(struct rm_class *cl, uint64_t psecPerByte, int maxq, u_int maxidle,
352    int minidle, u_int offtime, int pktsize)
353{
354	struct rm_ifdat	*ifd;
355	u_int		 old_allotment;
356	int		 s;
357
358	ifd = cl->ifdat_;
359	old_allotment = cl->allotment_;
360
361	s = splnet();
362	cl->allotment_ = (u_int)(RM_PS_PER_SEC / psecPerByte); /* Bytes per sec */
363	cl->qthresh_ = 0;
364	cl->ps_per_byte_ = psecPerByte;
365
366	qlimit(cl->q_) = maxq;
367
368#if 1 /* minidle is also scaled in ALTQ */
369	cl->minidle_ = ((int64_t)minidle * (int64_t)psecPerByte) / 8;
370	if (cl->minidle_ > 0)
371		cl->minidle_ = 0;
372#else
373	cl->minidle_ = minidle;
374#endif
375	cl->maxidle_ = ((int64_t)maxidle * (int64_t)psecPerByte) / 8;
376	if (cl->maxidle_ == 0)
377		cl->maxidle_ = 1;
378#if 1 /* offtime is also scaled in ALTQ */
379	cl->avgidle_ = cl->maxidle_;
380	cl->offtime_ = (((int64_t)offtime * (int64_t)psecPerByte) / 8) >> RM_FILTER_GAIN;
381	if (cl->offtime_ == 0)
382		cl->offtime_ = 1;
383#else
384	cl->avgidle_ = 0;
385	cl->offtime_ = (offtime * nsecPerByte) / 8;
386#endif
387
388	/*
389	 * If CBQ's WRR is enabled, then initialize the class WRR state.
390	 */
391	if (ifd->wrr_) {
392		ifd->alloc_[cl->pri_] += cl->allotment_ - old_allotment;
393		rmc_wrr_set_weights(ifd);
394	}
395	splx(s);
396	return (0);
397}
398
399/*
400 * static void
401 * rmc_wrr_set_weights(struct rm_ifdat *ifdat) - This function computes
402 *	the appropriate run robin weights for the CBQ weighted round robin
403 *	algorithm.
404 *
405 *	Returns: NONE
406 */
407
408static void
409rmc_wrr_set_weights(struct rm_ifdat *ifd)
410{
411	int		i;
412	struct rm_class	*cl, *clh;
413
414	for (i = 0; i < RM_MAXPRIO; i++) {
415		/*
416		 * This is inverted from that of the simulator to
417		 * maintain precision.
418		 */
419		if (ifd->num_[i] == 0)
420			ifd->M_[i] = 0;
421		else
422			ifd->M_[i] = ifd->alloc_[i] /
423				(ifd->num_[i] * ifd->maxpkt_);
424		/*
425		 * Compute the weighted allotment for each class.
426		 * This takes the expensive div instruction out
427		 * of the main loop for the wrr scheduling path.
428		 * These only get recomputed when a class comes or
429		 * goes.
430		 */
431		if (ifd->active_[i] != NULL) {
432			clh = cl = ifd->active_[i];
433			do {
434				/* safe-guard for slow link or alloc_ == 0 */
435				if (ifd->M_[i] == 0)
436					cl->w_allotment_ = 0;
437				else
438					cl->w_allotment_ = cl->allotment_ /
439						ifd->M_[i];
440				cl = cl->peer_;
441			} while ((cl != NULL) && (cl != clh));
442		}
443	}
444}
445
446int
447rmc_get_weight(struct rm_ifdat *ifd, int pri)
448{
449	if ((pri >= 0) && (pri < RM_MAXPRIO))
450		return (ifd->M_[pri]);
451	else
452		return (0);
453}
454
455/*
456 * static void
457 * rmc_depth_compute(struct rm_class *cl) - This function computes the
458 *	appropriate depth of class 'cl' and its ancestors.
459 *
460 *	Returns:	NONE
461 */
462
463static void
464rmc_depth_compute(struct rm_class *cl)
465{
466	rm_class_t	*t = cl, *p;
467
468	/*
469	 * Recompute the depth for the branch of the tree.
470	 */
471	while (t != NULL) {
472		p = t->parent_;
473		if (p && (t->depth_ >= p->depth_)) {
474			p->depth_ = t->depth_ + 1;
475			t = p;
476		} else
477			t = NULL;
478	}
479}
480
481/*
482 * static void
483 * rmc_depth_recompute(struct rm_class *cl) - This function re-computes
484 *	the depth of the tree after a class has been deleted.
485 *
486 *	Returns:	NONE
487 */
488
489static void
490rmc_depth_recompute(rm_class_t *cl)
491{
492#if 1 /* ALTQ */
493	rm_class_t	*p, *t;
494
495	p = cl;
496	while (p != NULL) {
497		if ((t = p->children_) == NULL) {
498			p->depth_ = 0;
499		} else {
500			int cdepth = 0;
501
502			while (t != NULL) {
503				if (t->depth_ > cdepth)
504					cdepth = t->depth_;
505				t = t->next_;
506			}
507
508			if (p->depth_ == cdepth + 1)
509				/* no change to this parent */
510				return;
511
512			p->depth_ = cdepth + 1;
513		}
514
515		p = p->parent_;
516	}
517#else
518	rm_class_t	*t;
519
520	if (cl->depth_ >= 1) {
521		if (cl->children_ == NULL) {
522			cl->depth_ = 0;
523		} else if ((t = cl->children_) != NULL) {
524			while (t != NULL) {
525				if (t->children_ != NULL)
526					rmc_depth_recompute(t);
527				t = t->next_;
528			}
529		} else
530			rmc_depth_compute(cl);
531	}
532#endif
533}
534
535/*
536 * void
537 * rmc_delete_class(struct rm_ifdat *ifdat, struct rm_class *cl) - This
538 *	function deletes a class from the link-sharing structure and frees
539 *	all resources associated with the class.
540 *
541 *	Returns: NONE
542 */
543
544void
545rmc_delete_class(struct rm_ifdat *ifd, struct rm_class *cl)
546{
547	struct rm_class	*p, *head, *previous;
548	int		 s;
549
550	ASSERT(cl->children_ == NULL);
551
552	if (cl->sleeping_)
553		CALLOUT_STOP(&cl->callout_);
554
555	s = splnet();
556	/*
557	 * Free packets in the packet queue.
558	 * XXX - this may not be a desired behavior.  Packets should be
559	 *		re-queued.
560	 */
561	rmc_dropall(cl);
562
563	/*
564	 * If the class has a parent, then remove the class from the
565	 * class from the parent's children chain.
566	 */
567	if (cl->parent_ != NULL) {
568		head = cl->parent_->children_;
569		p = previous = head;
570		if (head->next_ == NULL) {
571			ASSERT(head == cl);
572			cl->parent_->children_ = NULL;
573			cl->parent_->leaf_ = 1;
574		} else while (p != NULL) {
575			if (p == cl) {
576				if (cl == head)
577					cl->parent_->children_ = cl->next_;
578				else
579					previous->next_ = cl->next_;
580				cl->next_ = NULL;
581				p = NULL;
582			} else {
583				previous = p;
584				p = p->next_;
585			}
586		}
587	}
588
589	/*
590	 * Delete class from class priority peer list.
591	 */
592	if ((p = ifd->active_[cl->pri_]) != NULL) {
593		/*
594		 * If there is more than one member of this priority
595		 * level, then look for class(cl) in the priority level.
596		 */
597		if (p != p->peer_) {
598			while (p->peer_ != cl)
599				p = p->peer_;
600			p->peer_ = cl->peer_;
601
602			if (ifd->active_[cl->pri_] == cl)
603				ifd->active_[cl->pri_] = cl->peer_;
604		} else {
605			ASSERT(p == cl);
606			ifd->active_[cl->pri_] = NULL;
607		}
608	}
609
610	/*
611	 * Recompute the WRR weights.
612	 */
613	if (ifd->wrr_) {
614		ifd->alloc_[cl->pri_] -= cl->allotment_;
615		ifd->num_[cl->pri_]--;
616		rmc_wrr_set_weights(ifd);
617	}
618
619	/*
620	 * Re-compute the depth of the tree.
621	 */
622#if 1 /* ALTQ */
623	rmc_depth_recompute(cl->parent_);
624#else
625	rmc_depth_recompute(ifd->root_);
626#endif
627
628	splx(s);
629
630	/*
631	 * Free the class structure.
632	 */
633	if (cl->red_ != NULL) {
634#ifdef ALTQ_RIO
635		if (q_is_rio(cl->q_))
636			rio_destroy((rio_t *)cl->red_);
637#endif
638#ifdef ALTQ_RED
639		if (q_is_red(cl->q_))
640			red_destroy(cl->red_);
641#endif
642	}
643	free(cl->q_, M_DEVBUF);
644	free(cl, M_DEVBUF);
645}
646
647
648/*
649 * int
650 * rmc_init(...) - Initialize the resource management data structures
651 *	associated with the output portion of interface 'ifp'.  'ifd' is
652 *	where the structures will be built (for backwards compatibility, the
653 *	structures aren't kept in the ifnet struct).  'nsecPerByte'
654 *	gives the link speed (inverse of bandwidth) in nanoseconds/byte.
655 *	'restart' is the driver-specific routine that the generic 'delay
656 *	until under limit' action will call to restart output.  `maxq'
657 *	is the queue size of the 'link' & 'default' classes.  'maxqueued'
658 *	is the maximum number of packets that the resource management
659 *	code will allow to be queued 'downstream' (this is typically 1).
660 *
661 *	Returns:	0 on success
662 */
663
664int
665rmc_init(struct ifaltq *ifq, struct rm_ifdat *ifd, uint64_t psecPerByte,
666    void (*restart)(struct ifaltq *), int maxq, int maxqueued, u_int maxidle,
667    int minidle, u_int offtime, int flags)
668{
669	int i, mtu;
670
671	/*
672	 * Initialize the CBQ tracing/debug facility.
673	 */
674	CBQTRACEINIT();
675
676	mtu = ifq->altq_ifp->if_mtu;
677	if (mtu < 1) {
678		printf("altq: %s: invalid MTU (interface not initialized?)\n",
679		    ifq->altq_ifp->if_xname);
680		return (EINVAL);
681	}
682
683	(void)memset((char *)ifd, 0, sizeof (*ifd));
684	ifd->ifq_ = ifq;
685	ifd->restart = restart;
686	ifd->maxqueued_ = maxqueued;
687	ifd->ps_per_byte_ = psecPerByte;
688	ifd->maxpkt_ = mtu;
689	ifd->wrr_ = (flags & RMCF_WRR) ? 1 : 0;
690	ifd->efficient_ = (flags & RMCF_EFFICIENT) ? 1 : 0;
691#if 1
692	ifd->maxiftime_ = mtu * psecPerByte / 1000 / 1000 * 16;
693	if ((int64_t)mtu * psecPerByte > (int64_t)10 * 1000000000)
694		ifd->maxiftime_ /= 4;
695#endif
696
697	reset_cutoff(ifd);
698	CBQTRACE(rmc_init, 'INIT', ifd->cutoff_);
699
700	/*
701	 * Initialize the CBQ's WRR state.
702	 */
703	for (i = 0; i < RM_MAXPRIO; i++) {
704		ifd->alloc_[i] = 0;
705		ifd->M_[i] = 0;
706		ifd->num_[i] = 0;
707		ifd->na_[i] = 0;
708		ifd->active_[i] = NULL;
709	}
710
711	/*
712	 * Initialize current packet state.
713	 */
714	ifd->qi_ = 0;
715	ifd->qo_ = 0;
716	for (i = 0; i < RM_MAXQUEUED; i++) {
717		ifd->class_[i] = NULL;
718		ifd->curlen_[i] = 0;
719		ifd->borrowed_[i] = NULL;
720	}
721
722	/*
723	 * Create the root class of the link-sharing structure.
724	 */
725	if ((ifd->root_ = rmc_newclass(0, ifd,
726				       psecPerByte,
727				       rmc_root_overlimit, maxq, 0, 0,
728				       maxidle, minidle, offtime,
729				       0, 0)) == NULL) {
730		printf("rmc_init: root class not allocated\n");
731		return (ENOMEM);
732	}
733	ifd->root_->depth_ = 0;
734
735	return (0);
736}
737
738/*
739 * void
740 * rmc_queue_packet(struct rm_class *cl, mbuf_t *m) - Add packet given by
741 *	mbuf 'm' to queue for resource class 'cl'.  This routine is called
742 *	by a driver's if_output routine.  This routine must be called with
743 *	output packet completion interrupts locked out (to avoid racing with
744 *	rmc_dequeue_next).
745 *
746 *	Returns:	0 on successful queueing
747 *			-1 when packet drop occurs
748 */
749int
750rmc_queue_packet(struct rm_class *cl, mbuf_t *m)
751{
752	struct timespec	 now;
753	struct rm_ifdat *ifd = cl->ifdat_;
754	int		 cpri = cl->pri_;
755	int		 is_empty = qempty(cl->q_);
756
757	RM_GETTIME(now);
758	if (ifd->cutoff_ > 0) {
759		if (TS_LT(&cl->undertime_, &now)) {
760			if (ifd->cutoff_ > cl->depth_)
761				ifd->cutoff_ = cl->depth_;
762			CBQTRACE(rmc_queue_packet, 'ffoc', cl->depth_);
763		}
764#if 1 /* ALTQ */
765		else {
766			/*
767			 * the class is overlimit. if the class has
768			 * underlimit ancestors, set cutoff to the lowest
769			 * depth among them.
770			 */
771			struct rm_class *borrow = cl->borrow_;
772
773			while (borrow != NULL &&
774			       borrow->depth_ < ifd->cutoff_) {
775				if (TS_LT(&borrow->undertime_, &now)) {
776					ifd->cutoff_ = borrow->depth_;
777					CBQTRACE(rmc_queue_packet, 'ffob', ifd->cutoff_);
778					break;
779				}
780				borrow = borrow->borrow_;
781			}
782		}
783#else /* !ALTQ */
784		else if ((ifd->cutoff_ > 1) && cl->borrow_) {
785			if (TS_LT(&cl->borrow_->undertime_, &now)) {
786				ifd->cutoff_ = cl->borrow_->depth_;
787				CBQTRACE(rmc_queue_packet, 'ffob',
788					 cl->borrow_->depth_);
789			}
790		}
791#endif /* !ALTQ */
792	}
793
794	if (_rmc_addq(cl, m) < 0)
795		/* failed */
796		return (-1);
797
798	if (is_empty) {
799		CBQTRACE(rmc_queue_packet, 'ytpe', cl->stats_.handle);
800		ifd->na_[cpri]++;
801	}
802
803	if (qlen(cl->q_) > qlimit(cl->q_)) {
804		/* note: qlimit can be set to 0 or 1 */
805		rmc_drop_action(cl);
806		return (-1);
807	}
808	return (0);
809}
810
811/*
812 * void
813 * rmc_tl_satisfied(struct rm_ifdat *ifd, struct timespec *now) - Check all
814 *	classes to see if there are satified.
815 */
816
817static void
818rmc_tl_satisfied(struct rm_ifdat *ifd, struct timespec *now)
819{
820	int		 i;
821	rm_class_t	*p, *bp;
822
823	for (i = RM_MAXPRIO - 1; i >= 0; i--) {
824		if ((bp = ifd->active_[i]) != NULL) {
825			p = bp;
826			do {
827				if (!rmc_satisfied(p, now)) {
828					ifd->cutoff_ = p->depth_;
829					return;
830				}
831				p = p->peer_;
832			} while (p != bp);
833		}
834	}
835
836	reset_cutoff(ifd);
837}
838
839/*
840 * rmc_satisfied - Return 1 of the class is satisfied.  O, otherwise.
841 */
842
843static int
844rmc_satisfied(struct rm_class *cl, struct timespec *now)
845{
846	rm_class_t	*p;
847
848	if (cl == NULL)
849		return (1);
850	if (TS_LT(now, &cl->undertime_))
851		return (1);
852	if (cl->depth_ == 0) {
853		if (!cl->sleeping_ && (qlen(cl->q_) > cl->qthresh_))
854			return (0);
855		else
856			return (1);
857	}
858	if (cl->children_ != NULL) {
859		p = cl->children_;
860		while (p != NULL) {
861			if (!rmc_satisfied(p, now))
862				return (0);
863			p = p->next_;
864		}
865	}
866
867	return (1);
868}
869
870/*
871 * Return 1 if class 'cl' is under limit or can borrow from a parent,
872 * 0 if overlimit.  As a side-effect, this routine will invoke the
873 * class overlimit action if the class if overlimit.
874 */
875
876static int
877rmc_under_limit(struct rm_class *cl, struct timespec *now)
878{
879	rm_class_t	*p = cl;
880	rm_class_t	*top;
881	struct rm_ifdat	*ifd = cl->ifdat_;
882
883	ifd->borrowed_[ifd->qi_] = NULL;
884	/*
885	 * If cl is the root class, then always return that it is
886	 * underlimit.  Otherwise, check to see if the class is underlimit.
887	 */
888	if (cl->parent_ == NULL)
889		return (1);
890
891	if (cl->sleeping_) {
892		if (TS_LT(now, &cl->undertime_))
893			return (0);
894
895		CALLOUT_STOP(&cl->callout_);
896		cl->sleeping_ = 0;
897		cl->undertime_.tv_sec = 0;
898		return (1);
899	}
900
901	top = NULL;
902	while (cl->undertime_.tv_sec && TS_LT(now, &cl->undertime_)) {
903		if (((cl = cl->borrow_) == NULL) ||
904		    (cl->depth_ > ifd->cutoff_)) {
905#ifdef ADJUST_CUTOFF
906			if (cl != NULL)
907				/* cutoff is taking effect, just
908				   return false without calling
909				   the delay action. */
910				return (0);
911#endif
912#ifdef BORROW_OFFTIME
913			/*
914			 * check if the class can borrow offtime too.
915			 * borrow offtime from the top of the borrow
916			 * chain if the top class is not overloaded.
917			 */
918			if (cl != NULL) {
919				/* cutoff is taking effect, use this class as top. */
920				top = cl;
921				CBQTRACE(rmc_under_limit, 'ffou', ifd->cutoff_);
922			}
923			if (top != NULL && top->avgidle_ == top->minidle_)
924				top = NULL;
925			p->overtime_ = *now;
926			(p->overlimit)(p, top);
927#else
928			p->overtime_ = *now;
929			(p->overlimit)(p, NULL);
930#endif
931			return (0);
932		}
933		top = cl;
934	}
935
936	if (cl != p)
937		ifd->borrowed_[ifd->qi_] = cl;
938	return (1);
939}
940
941/*
942 * _rmc_wrr_dequeue_next() - This is scheduler for WRR as opposed to
943 *	Packet-by-packet round robin.
944 *
945 * The heart of the weighted round-robin scheduler, which decides which
946 * class next gets to send a packet.  Highest priority first, then
947 * weighted round-robin within priorites.
948 *
949 * Each able-to-send class gets to send until its byte allocation is
950 * exhausted.  Thus, the active pointer is only changed after a class has
951 * exhausted its allocation.
952 *
953 * If the scheduler finds no class that is underlimit or able to borrow,
954 * then the first class found that had a nonzero queue and is allowed to
955 * borrow gets to send.
956 */
957
958static mbuf_t *
959_rmc_wrr_dequeue_next(struct rm_ifdat *ifd, int op)
960{
961	struct rm_class	*cl = NULL, *first = NULL;
962	u_int		 deficit;
963	int		 cpri;
964	mbuf_t		*m;
965	struct timespec	 now;
966
967	RM_GETTIME(now);
968
969	/*
970	 * if the driver polls the top of the queue and then removes
971	 * the polled packet, we must return the same packet.
972	 */
973	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
974		cl = ifd->pollcache_;
975		cpri = cl->pri_;
976		if (ifd->efficient_) {
977			/* check if this class is overlimit */
978			if (cl->undertime_.tv_sec != 0 &&
979			    rmc_under_limit(cl, &now) == 0)
980				first = cl;
981		}
982		ifd->pollcache_ = NULL;
983		goto _wrr_out;
984	}
985	else {
986		/* mode == ALTDQ_POLL || pollcache == NULL */
987		ifd->pollcache_ = NULL;
988		ifd->borrowed_[ifd->qi_] = NULL;
989	}
990#ifdef ADJUST_CUTOFF
991 _again:
992#endif
993	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
994		if (ifd->na_[cpri] == 0)
995			continue;
996		deficit = 0;
997		/*
998		 * Loop through twice for a priority level, if some class
999		 * was unable to send a packet the first round because
1000		 * of the weighted round-robin mechanism.
1001		 * During the second loop at this level, deficit==2.
1002		 * (This second loop is not needed if for every class,
1003		 * "M[cl->pri_])" times "cl->allotment" is greater than
1004		 * the byte size for the largest packet in the class.)
1005		 */
1006 _wrr_loop:
1007		cl = ifd->active_[cpri];
1008		ASSERT(cl != NULL);
1009		do {
1010			if ((deficit < 2) && (cl->bytes_alloc_ <= 0))
1011				cl->bytes_alloc_ += cl->w_allotment_;
1012			if (!qempty(cl->q_)) {
1013				if ((cl->undertime_.tv_sec == 0) ||
1014				    rmc_under_limit(cl, &now)) {
1015					if (cl->bytes_alloc_ > 0 || deficit > 1)
1016						goto _wrr_out;
1017
1018					/* underlimit but no alloc */
1019					deficit = 1;
1020#if 1
1021					ifd->borrowed_[ifd->qi_] = NULL;
1022#endif
1023				}
1024				else if (first == NULL && cl->borrow_ != NULL)
1025					first = cl; /* borrowing candidate */
1026			}
1027
1028			cl->bytes_alloc_ = 0;
1029			cl = cl->peer_;
1030		} while (cl != ifd->active_[cpri]);
1031
1032		if (deficit == 1) {
1033			/* first loop found an underlimit class with deficit */
1034			/* Loop on same priority level, with new deficit.  */
1035			deficit = 2;
1036			goto _wrr_loop;
1037		}
1038	}
1039
1040#ifdef ADJUST_CUTOFF
1041	/*
1042	 * no underlimit class found.  if cutoff is taking effect,
1043	 * increase cutoff and try again.
1044	 */
1045	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1046		ifd->cutoff_++;
1047		CBQTRACE(_rmc_wrr_dequeue_next, 'ojda', ifd->cutoff_);
1048		goto _again;
1049	}
1050#endif /* ADJUST_CUTOFF */
1051	/*
1052	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1053	 * class we encounter will send a packet if all the classes
1054	 * of the link-sharing structure are overlimit.
1055	 */
1056	reset_cutoff(ifd);
1057	CBQTRACE(_rmc_wrr_dequeue_next, 'otsr', ifd->cutoff_);
1058
1059	if (!ifd->efficient_ || first == NULL)
1060		return (NULL);
1061
1062	cl = first;
1063	cpri = cl->pri_;
1064#if 0	/* too time-consuming for nothing */
1065	if (cl->sleeping_)
1066		CALLOUT_STOP(&cl->callout_);
1067	cl->sleeping_ = 0;
1068	cl->undertime_.tv_sec = 0;
1069#endif
1070	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1071	ifd->cutoff_ = cl->borrow_->depth_;
1072
1073	/*
1074	 * Deque the packet and do the book keeping...
1075	 */
1076 _wrr_out:
1077	if (op == ALTDQ_REMOVE) {
1078		m = _rmc_getq(cl);
1079		if (m == NULL)
1080			panic("_rmc_wrr_dequeue_next");
1081		if (qempty(cl->q_))
1082			ifd->na_[cpri]--;
1083
1084		/*
1085		 * Update class statistics and link data.
1086		 */
1087		if (cl->bytes_alloc_ > 0)
1088			cl->bytes_alloc_ -= m_pktlen(m);
1089
1090		if ((cl->bytes_alloc_ <= 0) || first == cl)
1091			ifd->active_[cl->pri_] = cl->peer_;
1092		else
1093			ifd->active_[cl->pri_] = cl;
1094
1095		ifd->class_[ifd->qi_] = cl;
1096		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1097		ifd->now_[ifd->qi_] = now;
1098		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1099		ifd->queued_++;
1100	} else {
1101		/* mode == ALTDQ_PPOLL */
1102		m = _rmc_pollq(cl);
1103		ifd->pollcache_ = cl;
1104	}
1105	return (m);
1106}
1107
1108/*
1109 * Dequeue & return next packet from the highest priority class that
1110 * has a packet to send & has enough allocation to send it.  This
1111 * routine is called by a driver whenever it needs a new packet to
1112 * output.
1113 */
1114static mbuf_t *
1115_rmc_prr_dequeue_next(struct rm_ifdat *ifd, int op)
1116{
1117	mbuf_t		*m;
1118	int		 cpri;
1119	struct rm_class	*cl, *first = NULL;
1120	struct timespec	 now;
1121
1122	RM_GETTIME(now);
1123
1124	/*
1125	 * if the driver polls the top of the queue and then removes
1126	 * the polled packet, we must return the same packet.
1127	 */
1128	if (op == ALTDQ_REMOVE && ifd->pollcache_) {
1129		cl = ifd->pollcache_;
1130		cpri = cl->pri_;
1131		ifd->pollcache_ = NULL;
1132		goto _prr_out;
1133	} else {
1134		/* mode == ALTDQ_POLL || pollcache == NULL */
1135		ifd->pollcache_ = NULL;
1136		ifd->borrowed_[ifd->qi_] = NULL;
1137	}
1138#ifdef ADJUST_CUTOFF
1139 _again:
1140#endif
1141	for (cpri = RM_MAXPRIO - 1; cpri >= 0; cpri--) {
1142		if (ifd->na_[cpri] == 0)
1143			continue;
1144		cl = ifd->active_[cpri];
1145		ASSERT(cl != NULL);
1146		do {
1147			if (!qempty(cl->q_)) {
1148				if ((cl->undertime_.tv_sec == 0) ||
1149				    rmc_under_limit(cl, &now))
1150					goto _prr_out;
1151				if (first == NULL && cl->borrow_ != NULL)
1152					first = cl;
1153			}
1154			cl = cl->peer_;
1155		} while (cl != ifd->active_[cpri]);
1156	}
1157
1158#ifdef ADJUST_CUTOFF
1159	/*
1160	 * no underlimit class found.  if cutoff is taking effect, increase
1161	 * cutoff and try again.
1162	 */
1163	if (first != NULL && ifd->cutoff_ < ifd->root_->depth_) {
1164		ifd->cutoff_++;
1165		goto _again;
1166	}
1167#endif /* ADJUST_CUTOFF */
1168	/*
1169	 * If LINK_EFFICIENCY is turned on, then the first overlimit
1170	 * class we encounter will send a packet if all the classes
1171	 * of the link-sharing structure are overlimit.
1172	 */
1173	reset_cutoff(ifd);
1174	if (!ifd->efficient_ || first == NULL)
1175		return (NULL);
1176
1177	cl = first;
1178	cpri = cl->pri_;
1179#if 0	/* too time-consuming for nothing */
1180	if (cl->sleeping_)
1181		CALLOUT_STOP(&cl->callout_);
1182	cl->sleeping_ = 0;
1183	cl->undertime_.tv_sec = 0;
1184#endif
1185	ifd->borrowed_[ifd->qi_] = cl->borrow_;
1186	ifd->cutoff_ = cl->borrow_->depth_;
1187
1188	/*
1189	 * Deque the packet and do the book keeping...
1190	 */
1191 _prr_out:
1192	if (op == ALTDQ_REMOVE) {
1193		m = _rmc_getq(cl);
1194		if (m == NULL)
1195			panic("_rmc_prr_dequeue_next");
1196		if (qempty(cl->q_))
1197			ifd->na_[cpri]--;
1198
1199		ifd->active_[cpri] = cl->peer_;
1200
1201		ifd->class_[ifd->qi_] = cl;
1202		ifd->curlen_[ifd->qi_] = m_pktlen(m);
1203		ifd->now_[ifd->qi_] = now;
1204		ifd->qi_ = (ifd->qi_ + 1) % ifd->maxqueued_;
1205		ifd->queued_++;
1206	} else {
1207		/* mode == ALTDQ_POLL */
1208		m = _rmc_pollq(cl);
1209		ifd->pollcache_ = cl;
1210	}
1211	return (m);
1212}
1213
1214/*
1215 * mbuf_t *
1216 * rmc_dequeue_next(struct rm_ifdat *ifd, struct timespec *now) - this function
1217 *	is invoked by the packet driver to get the next packet to be
1218 *	dequeued and output on the link.  If WRR is enabled, then the
1219 *	WRR dequeue next routine will determine the next packet to sent.
1220 *	Otherwise, packet-by-packet round robin is invoked.
1221 *
1222 *	Returns:	NULL, if a packet is not available or if all
1223 *			classes are overlimit.
1224 *
1225 *			Otherwise, Pointer to the next packet.
1226 */
1227
1228mbuf_t *
1229rmc_dequeue_next(struct rm_ifdat *ifd, int mode)
1230{
1231	if (ifd->queued_ >= ifd->maxqueued_)
1232		return (NULL);
1233	else if (ifd->wrr_)
1234		return (_rmc_wrr_dequeue_next(ifd, mode));
1235	else
1236		return (_rmc_prr_dequeue_next(ifd, mode));
1237}
1238
1239/*
1240 * Update the utilization estimate for the packet that just completed.
1241 * The packet's class & the parent(s) of that class all get their
1242 * estimators updated.  This routine is called by the driver's output-
1243 * packet-completion interrupt service routine.
1244 */
1245
1246/*
1247 * a macro to approximate "divide by 1000" that gives 0.000999,
1248 * if a value has enough effective digits.
1249 * (on pentium, mul takes 9 cycles but div takes 46!)
1250 */
1251#define	NSEC_TO_USEC(t)	(((t) >> 10) + ((t) >> 16) + ((t) >> 17))
1252/* Don't worry.  Recent compilers don't use div. */
1253#define	PSEC_TO_USEC(t)	((t) / 1000 / 1000)
1254void
1255rmc_update_class_util(struct rm_ifdat *ifd)
1256{
1257	int64_t		 idle, avgidle, pktlen;
1258	int64_t		 pkt_time;
1259	int64_t		 tidle;
1260	rm_class_t	*cl, *cl0, *borrowed;
1261	rm_class_t	*borrows;
1262	struct timespec	*nowp;
1263
1264	/*
1265	 * Get the most recent completed class.
1266	 */
1267	if ((cl = ifd->class_[ifd->qo_]) == NULL)
1268		return;
1269
1270	cl0 = cl;
1271	pktlen = (int64_t)ifd->curlen_[ifd->qo_];
1272	borrowed = ifd->borrowed_[ifd->qo_];
1273	borrows = borrowed;
1274
1275	PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1276
1277	/*
1278	 * Run estimator on class and its ancestors.
1279	 */
1280	/*
1281	 * rm_update_class_util is designed to be called when the
1282	 * transfer is completed from a xmit complete interrupt,
1283	 * but most drivers don't implement an upcall for that.
1284	 * so, just use estimated completion time.
1285	 * as a result, ifd->qi_ and ifd->qo_ are always synced.
1286	 */
1287	nowp = &ifd->now_[ifd->qo_];
1288	/* get pkt_time (for link) in usec */
1289#if 1  /* use approximation */
1290	pkt_time = (int64_t)ifd->curlen_[ifd->qo_] * (int64_t)ifd->ps_per_byte_;
1291	pkt_time = PSEC_TO_NSEC(pkt_time);
1292#else
1293	pkt_time = ifd->curlen_[ifd->qo_] * ifd->ns_per_byte_ / 1000;
1294#endif
1295	if (ifd->ifq_->altq_ifp->if_type == IFT_PPP) {
1296		if (TS_LT(nowp, &ifd->ifnow_)) {
1297			int iftime;
1298
1299			/*
1300			 * make sure the estimated completion time does not go
1301			 * too far.  it can happen when the link layer supports
1302			 * data compression or the interface speed is set to
1303			 * a much lower value.
1304			 */
1305			TS_DELTA(&ifd->ifnow_, nowp, iftime);
1306			if (iftime+pkt_time < ifd->maxiftime_) {
1307				TS_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1308			} else {
1309				TS_ADD_DELTA(nowp, ifd->maxiftime_, &ifd->ifnow_);
1310			}
1311		} else {
1312			TS_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1313		}
1314	} else {
1315		if (TS_LT(nowp, &ifd->ifnow_)) {
1316			TS_ADD_DELTA(&ifd->ifnow_, pkt_time, &ifd->ifnow_);
1317		} else {
1318			TS_ADD_DELTA(nowp, pkt_time, &ifd->ifnow_);
1319		}
1320	}
1321
1322	while (cl != NULL) {
1323		TS_DELTA(&ifd->ifnow_, &cl->last_, idle);
1324		if (idle >= 2000000000)
1325			/*
1326			 * this class is idle enough, reset avgidle.
1327			 * (TS_DELTA returns 2000000000 ns when delta is large.)
1328			 */
1329			cl->avgidle_ = cl->maxidle_;
1330
1331		/* get pkt_time (for class) in usec */
1332#if 1  /* use approximation */
1333		pkt_time = pktlen * (int64_t)cl->ps_per_byte_;
1334		pkt_time = PSEC_TO_NSEC(pkt_time);
1335#else
1336		pkt_time = pktlen * cl->ns_per_byte_ / 1000;
1337#endif
1338		idle -= pkt_time;
1339
1340		avgidle = cl->avgidle_;
1341		avgidle += idle - (avgidle >> RM_FILTER_GAIN);
1342		cl->avgidle_ = avgidle;
1343
1344		/* Are we overlimit ? */
1345		if (avgidle <= 0) {
1346			CBQTRACE(rmc_update_class_util, 'milo', cl->stats_.handle);
1347#if 1 /* ALTQ */
1348			/*
1349			 * need some lower bound for avgidle, otherwise
1350			 * a borrowing class gets unbounded penalty.
1351			 */
1352			if (avgidle < cl->minidle_)
1353				avgidle = cl->avgidle_ = cl->minidle_;
1354#endif
1355			/* set next idle to make avgidle 0 */
1356			tidle = pkt_time +
1357				(((1 - RM_POWER) * avgidle) >> RM_FILTER_GAIN);
1358			TS_ADD_DELTA(nowp, tidle, &cl->undertime_);
1359			++cl->stats_.over;
1360		} else {
1361			cl->avgidle_ =
1362			    (avgidle > cl->maxidle_) ? cl->maxidle_ : avgidle;
1363			cl->undertime_.tv_sec = 0;
1364			if (cl->sleeping_) {
1365				CALLOUT_STOP(&cl->callout_);
1366				cl->sleeping_ = 0;
1367			}
1368		}
1369
1370		if (borrows != NULL) {
1371			if (borrows != cl)
1372				++cl->stats_.borrows;
1373			else
1374				borrows = NULL;
1375		}
1376		cl->last_ = ifd->ifnow_;
1377		cl->last_pkttime_ = pkt_time;
1378
1379#if 1
1380		if (cl->parent_ == NULL && cl != cl0) {
1381			/* take stats of root class */
1382			PKTCNTR_ADD(&cl->stats_.xmit_cnt, pktlen);
1383		}
1384#endif
1385
1386		cl = cl->parent_;
1387	}
1388
1389	/*
1390	 * Check to see if cutoff needs to set to a new level.
1391	 */
1392	cl = ifd->class_[ifd->qo_];
1393	if (borrowed && (ifd->cutoff_ >= borrowed->depth_)) {
1394#if 1 /* ALTQ */
1395		if ((qlen(cl->q_) <= 0) || TS_LT(nowp, &borrowed->undertime_)) {
1396			rmc_tl_satisfied(ifd, nowp);
1397			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1398		} else {
1399			ifd->cutoff_ = borrowed->depth_;
1400			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1401		}
1402#else /* !ALTQ */
1403		if ((qlen(cl->q_) <= 1) || TS_LT(&now, &borrowed->undertime_)) {
1404			reset_cutoff(ifd);
1405#ifdef notdef
1406			rmc_tl_satisfied(ifd, &now);
1407#endif
1408			CBQTRACE(rmc_update_class_util, 'broe', ifd->cutoff_);
1409		} else {
1410			ifd->cutoff_ = borrowed->depth_;
1411			CBQTRACE(rmc_update_class_util, 'ffob', borrowed->depth_);
1412		}
1413#endif /* !ALTQ */
1414	}
1415
1416	/*
1417	 * Release class slot
1418	 */
1419	ifd->borrowed_[ifd->qo_] = NULL;
1420	ifd->class_[ifd->qo_] = NULL;
1421	ifd->qo_ = (ifd->qo_ + 1) % ifd->maxqueued_;
1422	ifd->queued_--;
1423}
1424
1425/*
1426 * void
1427 * rmc_drop_action(struct rm_class *cl) - Generic (not protocol-specific)
1428 *	over-limit action routines.  These get invoked by rmc_under_limit()
1429 *	if a class with packets to send if over its bandwidth limit & can't
1430 *	borrow from a parent class.
1431 *
1432 *	Returns: NONE
1433 */
1434
1435static void
1436rmc_drop_action(struct rm_class *cl)
1437{
1438	struct rm_ifdat	*ifd = cl->ifdat_;
1439
1440	ASSERT(qlen(cl->q_) > 0);
1441	_rmc_dropq(cl);
1442	if (qempty(cl->q_))
1443		ifd->na_[cl->pri_]--;
1444}
1445
1446void
1447rmc_dropall(struct rm_class *cl)
1448{
1449	struct rm_ifdat	*ifd = cl->ifdat_;
1450
1451	if (!qempty(cl->q_)) {
1452		_flushq(cl->q_);
1453
1454		ifd->na_[cl->pri_]--;
1455	}
1456}
1457
1458#if (__FreeBSD_version > 300000)
1459static int tvhzto(struct timeval *);
1460
1461static int
1462tvhzto(struct timeval *tv)
1463{
1464	struct timeval t2;
1465
1466	getmicrotime(&t2);
1467	t2.tv_sec = tv->tv_sec - t2.tv_sec;
1468	t2.tv_usec = tv->tv_usec - t2.tv_usec;
1469	return (tvtohz(&t2));
1470}
1471#endif /* __FreeBSD_version > 300000 */
1472
1473/*
1474 * void
1475 * rmc_delay_action(struct rm_class *cl) - This function is the generic CBQ
1476 *	delay action routine.  It is invoked via rmc_under_limit when the
1477 *	packet is discovered to be overlimit.
1478 *
1479 *	If the delay action is result of borrow class being overlimit, then
1480 *	delay for the offtime of the borrowing class that is overlimit.
1481 *
1482 *	Returns: NONE
1483 */
1484
1485void
1486rmc_delay_action(struct rm_class *cl, struct rm_class *borrow)
1487{
1488	int	t;
1489	int64_t	ndelay, extradelay;
1490
1491	cl->stats_.overactions++;
1492	if (borrow != NULL)
1493		TS_DELTA(&borrow->undertime_, &cl->overtime_, ndelay);
1494	else
1495		TS_DELTA(&cl->undertime_, &cl->overtime_, ndelay);
1496#ifndef BORROW_OFFTIME
1497	ndelay += cl->offtime_;
1498#endif
1499
1500	if (!cl->sleeping_) {
1501		CBQTRACE(rmc_delay_action, 'yled', cl->stats_.handle);
1502#ifdef BORROW_OFFTIME
1503		if (borrow != NULL)
1504			extradelay = borrow->offtime_;
1505		else
1506#endif
1507			extradelay = cl->offtime_;
1508
1509#ifdef ALTQ
1510		/*
1511		 * XXX recalculate suspend time:
1512		 * current undertime is (tidle + pkt_time) calculated
1513		 * from the last transmission.
1514		 *	tidle: time required to bring avgidle back to 0
1515		 *	pkt_time: target waiting time for this class
1516		 * we need to replace pkt_time by offtime
1517		 */
1518		extradelay -= cl->last_pkttime_;
1519#endif
1520		if (extradelay > 0) {
1521			TS_ADD_DELTA(&cl->undertime_, extradelay, &cl->undertime_);
1522			ndelay += extradelay;
1523		}
1524
1525		cl->sleeping_ = 1;
1526		cl->stats_.delays++;
1527
1528		/*
1529		 * Since packets are phased randomly with respect to the
1530		 * clock, 1 tick (the next clock tick) can be an arbitrarily
1531		 * short time so we have to wait for at least two ticks.
1532		 * NOTE:  If there's no other traffic, we need the timer as
1533		 * a 'backstop' to restart this class.
1534		 */
1535		if (NSEC_TO_USEC(ndelay) > tick * 2) {
1536#ifdef __FreeBSD__
1537			/* FreeBSD rounds up the tick */
1538			t = tvhzto(&cl->undertime_);
1539#else
1540			/* other BSDs round down the tick */
1541			t = tshzto(&cl->undertime_) + 1;
1542#endif
1543		} else
1544			t = 2;
1545		CALLOUT_RESET(&cl->callout_, t,
1546			      (timeout_t *)rmc_restart, (void *)cl);
1547	}
1548}
1549
1550/*
1551 * void
1552 * rmc_restart() - is just a helper routine for rmc_delay_action -- it is
1553 *	called by the system timer code & is responsible checking if the
1554 *	class is still sleeping (it might have been restarted as a side
1555 *	effect of the queue scan on a packet arrival) and, if so, restarting
1556 *	output for the class.  Inspecting the class state & restarting output
1557 *	require locking the class structure.  In general the driver is
1558 *	responsible for locking but this is the only routine that is not
1559 *	called directly or indirectly from the interface driver so it has
1560 *	know about system locking conventions.  Under bsd, locking is done
1561 *	by raising IPL to splnet so that's what's implemented here.  On a
1562 *	different system this would probably need to be changed.
1563 *
1564 *	Returns:	NONE
1565 */
1566
1567static void
1568rmc_restart(struct rm_class *cl)
1569{
1570	struct rm_ifdat	*ifd = cl->ifdat_;
1571	int		 s;
1572
1573	s = splnet();
1574	if (cl->sleeping_) {
1575		cl->sleeping_ = 0;
1576		cl->undertime_.tv_sec = 0;
1577
1578		if (ifd->queued_ < ifd->maxqueued_ && ifd->restart != NULL) {
1579			CBQTRACE(rmc_restart, 'trts', cl->stats_.handle);
1580			(ifd->restart)(ifd->ifq_);
1581		}
1582	}
1583	splx(s);
1584}
1585
1586/*
1587 * void
1588 * rmc_root_overlimit(struct rm_class *cl) - This the generic overlimit
1589 *	handling routine for the root class of the link sharing structure.
1590 *
1591 *	Returns: NONE
1592 */
1593
1594static void
1595rmc_root_overlimit(struct rm_class *cl,
1596    struct rm_class *borrow)
1597{
1598	panic("rmc_root_overlimit");
1599}
1600
1601/*
1602 * Packet Queue handling routines.  Eventually, this is to localize the
1603 *	effects on the code whether queues are red queues or droptail
1604 *	queues.
1605 */
1606
1607static int
1608_rmc_addq(rm_class_t *cl, mbuf_t *m)
1609{
1610#ifdef ALTQ_RIO
1611	if (q_is_rio(cl->q_))
1612		return rio_addq((rio_t *)cl->red_, cl->q_, m, cl->pktattr_);
1613#endif
1614#ifdef ALTQ_RED
1615	if (q_is_red(cl->q_))
1616		return red_addq(cl->red_, cl->q_, m, cl->pktattr_);
1617#endif /* ALTQ_RED */
1618
1619	if (cl->flags_ & RMCF_CLEARDSCP)
1620		write_dsfield(m, cl->pktattr_, 0);
1621
1622	_addq(cl->q_, m);
1623	return (0);
1624}
1625
1626/* note: _rmc_dropq is not called for red */
1627static void
1628_rmc_dropq(rm_class_t *cl)
1629{
1630	mbuf_t	*m;
1631
1632	if ((m = _getq(cl->q_)) != NULL)
1633		m_freem(m);
1634}
1635
1636static mbuf_t *
1637_rmc_getq(rm_class_t *cl)
1638{
1639#ifdef ALTQ_RIO
1640	if (q_is_rio(cl->q_))
1641		return rio_getq((rio_t *)cl->red_, cl->q_);
1642#endif
1643#ifdef ALTQ_RED
1644	if (q_is_red(cl->q_))
1645		return red_getq(cl->red_, cl->q_);
1646#endif
1647	return _getq(cl->q_);
1648}
1649
1650static mbuf_t *
1651_rmc_pollq(rm_class_t *cl)
1652{
1653	return qhead(cl->q_);
1654}
1655
1656#ifdef CBQ_TRACE
1657
1658struct cbqtrace		 cbqtrace_buffer[NCBQTRACE+1];
1659struct cbqtrace		*cbqtrace_ptr = NULL;
1660int			 cbqtrace_count;
1661
1662/*
1663 * DDB hook to trace cbq events:
1664 *  the last 1024 events are held in a circular buffer.
1665 *  use "call cbqtrace_dump(N)" to display 20 events from Nth event.
1666 */
1667void cbqtrace_dump(int);
1668static char *rmc_funcname(void *);
1669
1670static struct rmc_funcs {
1671	void	*func;
1672	char	*name;
1673} rmc_funcs[] =
1674{
1675	rmc_init,		"rmc_init",
1676	rmc_queue_packet,	"rmc_queue_packet",
1677	rmc_under_limit,	"rmc_under_limit",
1678	rmc_update_class_util,	"rmc_update_class_util",
1679	rmc_delay_action,	"rmc_delay_action",
1680	rmc_restart,		"rmc_restart",
1681	_rmc_wrr_dequeue_next,	"_rmc_wrr_dequeue_next",
1682	NULL,			NULL
1683};
1684
1685static char *
1686rmc_funcname(void *func)
1687{
1688	struct rmc_funcs *fp;
1689
1690	for (fp = rmc_funcs; fp->func != NULL; fp++)
1691		if (fp->func == func)
1692			return (fp->name);
1693	return ("unknown");
1694}
1695
1696void
1697cbqtrace_dump(int counter)
1698{
1699	int	 i, *p;
1700	char	*cp;
1701
1702	counter = counter % NCBQTRACE;
1703	p = (int *)&cbqtrace_buffer[counter];
1704
1705	for (i=0; i<20; i++) {
1706		printf("[0x%x] ", *p++);
1707		printf("%s: ", rmc_funcname((void *)*p++));
1708		cp = (char *)p++;
1709		printf("%c%c%c%c: ", cp[0], cp[1], cp[2], cp[3]);
1710		printf("%d\n",*p++);
1711
1712		if (p >= (int *)&cbqtrace_buffer[NCBQTRACE])
1713			p = (int *)cbqtrace_buffer;
1714	}
1715}
1716#endif /* CBQ_TRACE */
1717#endif /* ALTQ_CBQ */
1718
1719#if defined(ALTQ_CBQ) || defined(ALTQ_RED) || defined(ALTQ_RIO) || defined(ALTQ_HFSC) || defined(ALTQ_PRIQ)
1720#if !defined(__GNUC__) || defined(ALTQ_DEBUG)
1721
1722void
1723_addq(class_queue_t *q, mbuf_t *m)
1724{
1725        mbuf_t	*m0;
1726
1727	if ((m0 = qtail(q)) != NULL)
1728		m->m_nextpkt = m0->m_nextpkt;
1729	else
1730		m0 = m;
1731	m0->m_nextpkt = m;
1732	qtail(q) = m;
1733	qlen(q)++;
1734}
1735
1736mbuf_t *
1737_getq(class_queue_t *q)
1738{
1739	mbuf_t	*m, *m0;
1740
1741	if ((m = qtail(q)) == NULL)
1742		return (NULL);
1743	if ((m0 = m->m_nextpkt) != m)
1744		m->m_nextpkt = m0->m_nextpkt;
1745	else {
1746		ASSERT(qlen(q) == 1);
1747		qtail(q) = NULL;
1748	}
1749	qlen(q)--;
1750	m0->m_nextpkt = NULL;
1751	return (m0);
1752}
1753
1754/* drop a packet at the tail of the queue */
1755mbuf_t *
1756_getq_tail(class_queue_t *q)
1757{
1758	mbuf_t	*m, *m0, *prev;
1759
1760	if ((m = m0 = qtail(q)) == NULL)
1761		return NULL;
1762	do {
1763		prev = m0;
1764		m0 = m0->m_nextpkt;
1765	} while (m0 != m);
1766	prev->m_nextpkt = m->m_nextpkt;
1767	if (prev == m)  {
1768		ASSERT(qlen(q) == 1);
1769		qtail(q) = NULL;
1770	} else
1771		qtail(q) = prev;
1772	qlen(q)--;
1773	m->m_nextpkt = NULL;
1774	return (m);
1775}
1776
1777/* randomly select a packet in the queue */
1778mbuf_t *
1779_getq_random(class_queue_t *q)
1780{
1781	struct mbuf	*m;
1782	int		 i, n;
1783
1784	if ((m = qtail(q)) == NULL)
1785		return NULL;
1786	if (m->m_nextpkt == m) {
1787		ASSERT(qlen(q) == 1);
1788		qtail(q) = NULL;
1789	} else {
1790		struct mbuf *prev = NULL;
1791
1792		n = cprng_fast32() % qlen(q) + 1;
1793		for (i = 0; i < n; i++) {
1794			prev = m;
1795			m = m->m_nextpkt;
1796		}
1797		prev->m_nextpkt = m->m_nextpkt;
1798		if (m == qtail(q))
1799			qtail(q) = prev;
1800	}
1801	qlen(q)--;
1802	m->m_nextpkt = NULL;
1803	return (m);
1804}
1805
1806void
1807_removeq(class_queue_t *q, mbuf_t *m)
1808{
1809	mbuf_t	*m0, *prev;
1810
1811	m0 = qtail(q);
1812	do {
1813		prev = m0;
1814		m0 = m0->m_nextpkt;
1815	} while (m0 != m);
1816	prev->m_nextpkt = m->m_nextpkt;
1817	if (prev == m)
1818		qtail(q) = NULL;
1819	else if (qtail(q) == m)
1820		qtail(q) = prev;
1821	qlen(q)--;
1822}
1823
1824void
1825_flushq(class_queue_t *q)
1826{
1827	mbuf_t *m;
1828
1829	while ((m = _getq(q)) != NULL)
1830		m_freem(m);
1831	ASSERT(qlen(q) == 0);
1832}
1833
1834#endif /* !__GNUC__ || ALTQ_DEBUG */
1835#endif /* ALTQ_CBQ || ALTQ_RED || ALTQ_RIO || ALTQ_HFSC || ALTQ_PRIQ */
1836