1130368Smlaier/*	$FreeBSD$	*/
2130365Smlaier/*	$KAME: altq_hfsc.c,v 1.24 2003/12/05 05:40:46 kjc Exp $	*/
3130365Smlaier
4130365Smlaier/*
5130365Smlaier * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6130365Smlaier *
7130365Smlaier * Permission to use, copy, modify, and distribute this software and
8130365Smlaier * its documentation is hereby granted (including for commercial or
9130365Smlaier * for-profit use), provided that both the copyright notice and this
10130365Smlaier * permission notice appear in all copies of the software, derivative
11130365Smlaier * works, or modified versions, and any portions thereof.
12130365Smlaier *
13130365Smlaier * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14130365Smlaier * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
15130365Smlaier * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16130365Smlaier * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17130365Smlaier * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18130365Smlaier * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19130365Smlaier * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20130365Smlaier * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21130365Smlaier * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22130365Smlaier * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23130365Smlaier * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24130365Smlaier * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25130365Smlaier * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26130365Smlaier * DAMAGE.
27130365Smlaier *
28130365Smlaier * Carnegie Mellon encourages (but does not require) users of this
29130365Smlaier * software to return any improvements or extensions that they make,
30130365Smlaier * and to grant Carnegie Mellon the rights to redistribute these
31130365Smlaier * changes without encumbrance.
32130365Smlaier */
33130365Smlaier/*
34130365Smlaier * H-FSC is described in Proceedings of SIGCOMM'97,
35130365Smlaier * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36130365Smlaier * Real-Time and Priority Service"
37130365Smlaier * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38130365Smlaier *
39130365Smlaier * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40130365Smlaier * when a class has an upperlimit, the fit-time is computed from the
41130365Smlaier * upperlimit service curve.  the link-sharing scheduler does not schedule
42130365Smlaier * a class whose fit-time exceeds the current time.
43130365Smlaier */
44130365Smlaier
45130365Smlaier#if defined(__FreeBSD__) || defined(__NetBSD__)
46130365Smlaier#include "opt_altq.h"
47130365Smlaier#include "opt_inet.h"
48130365Smlaier#ifdef __FreeBSD__
49130365Smlaier#include "opt_inet6.h"
50130365Smlaier#endif
51130365Smlaier#endif /* __FreeBSD__ || __NetBSD__ */
52130365Smlaier
53130365Smlaier#ifdef ALTQ_HFSC  /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
54130365Smlaier
55130365Smlaier#include <sys/param.h>
56130365Smlaier#include <sys/malloc.h>
57130365Smlaier#include <sys/mbuf.h>
58130365Smlaier#include <sys/socket.h>
59130365Smlaier#include <sys/systm.h>
60130365Smlaier#include <sys/errno.h>
61130365Smlaier#include <sys/queue.h>
62130365Smlaier#if 1 /* ALTQ3_COMPAT */
63130365Smlaier#include <sys/sockio.h>
64130365Smlaier#include <sys/proc.h>
65130365Smlaier#include <sys/kernel.h>
66130365Smlaier#endif /* ALTQ3_COMPAT */
67130365Smlaier
68130365Smlaier#include <net/if.h>
69130365Smlaier#include <netinet/in.h>
70130365Smlaier
71130365Smlaier#include <net/pfvar.h>
72130365Smlaier#include <altq/altq.h>
73130365Smlaier#include <altq/altq_hfsc.h>
74130365Smlaier#ifdef ALTQ3_COMPAT
75130365Smlaier#include <altq/altq_conf.h>
76130365Smlaier#endif
77130365Smlaier
78130365Smlaier/*
79130365Smlaier * function prototypes
80130365Smlaier */
81130365Smlaierstatic int			 hfsc_clear_interface(struct hfsc_if *);
82130365Smlaierstatic int			 hfsc_request(struct ifaltq *, int, void *);
83130365Smlaierstatic void			 hfsc_purge(struct hfsc_if *);
84130365Smlaierstatic struct hfsc_class	*hfsc_class_create(struct hfsc_if *,
85130365Smlaier    struct service_curve *, struct service_curve *, struct service_curve *,
86130365Smlaier    struct hfsc_class *, int, int, int);
87130365Smlaierstatic int			 hfsc_class_destroy(struct hfsc_class *);
88130365Smlaierstatic struct hfsc_class	*hfsc_nextclass(struct hfsc_class *);
89130365Smlaierstatic int			 hfsc_enqueue(struct ifaltq *, struct mbuf *,
90130365Smlaier				    struct altq_pktattr *);
91130365Smlaierstatic struct mbuf		*hfsc_dequeue(struct ifaltq *, int);
92130365Smlaier
93130365Smlaierstatic int		 hfsc_addq(struct hfsc_class *, struct mbuf *);
94130365Smlaierstatic struct mbuf	*hfsc_getq(struct hfsc_class *);
95130365Smlaierstatic struct mbuf	*hfsc_pollq(struct hfsc_class *);
96130365Smlaierstatic void		 hfsc_purgeq(struct hfsc_class *);
97130365Smlaier
98130365Smlaierstatic void		 update_cfmin(struct hfsc_class *);
99130365Smlaierstatic void		 set_active(struct hfsc_class *, int);
100130365Smlaierstatic void		 set_passive(struct hfsc_class *);
101130365Smlaier
102130365Smlaierstatic void		 init_ed(struct hfsc_class *, int);
103130365Smlaierstatic void		 update_ed(struct hfsc_class *, int);
104130365Smlaierstatic void		 update_d(struct hfsc_class *, int);
105130365Smlaierstatic void		 init_vf(struct hfsc_class *, int);
106130365Smlaierstatic void		 update_vf(struct hfsc_class *, int, u_int64_t);
107130365Smlaierstatic void		 ellist_insert(struct hfsc_class *);
108130365Smlaierstatic void		 ellist_remove(struct hfsc_class *);
109130365Smlaierstatic void		 ellist_update(struct hfsc_class *);
110247830Sglebiusstruct hfsc_class	*hfsc_get_mindl(struct hfsc_if *, u_int64_t);
111130365Smlaierstatic void		 actlist_insert(struct hfsc_class *);
112130365Smlaierstatic void		 actlist_remove(struct hfsc_class *);
113130365Smlaierstatic void		 actlist_update(struct hfsc_class *);
114130365Smlaier
115130365Smlaierstatic struct hfsc_class	*actlist_firstfit(struct hfsc_class *,
116130365Smlaier				    u_int64_t);
117130365Smlaier
118130365Smlaierstatic __inline u_int64_t	seg_x2y(u_int64_t, u_int64_t);
119130365Smlaierstatic __inline u_int64_t	seg_y2x(u_int64_t, u_int64_t);
120130365Smlaierstatic __inline u_int64_t	m2sm(u_int);
121130365Smlaierstatic __inline u_int64_t	m2ism(u_int);
122130365Smlaierstatic __inline u_int64_t	d2dx(u_int);
123130365Smlaierstatic u_int			sm2m(u_int64_t);
124130365Smlaierstatic u_int			dx2d(u_int64_t);
125130365Smlaier
126130365Smlaierstatic void		sc2isc(struct service_curve *, struct internal_sc *);
127130365Smlaierstatic void		rtsc_init(struct runtime_sc *, struct internal_sc *,
128130365Smlaier			    u_int64_t, u_int64_t);
129130365Smlaierstatic u_int64_t	rtsc_y2x(struct runtime_sc *, u_int64_t);
130130365Smlaierstatic u_int64_t	rtsc_x2y(struct runtime_sc *, u_int64_t);
131130365Smlaierstatic void		rtsc_min(struct runtime_sc *, struct internal_sc *,
132130365Smlaier			    u_int64_t, u_int64_t);
133130365Smlaier
134130365Smlaierstatic void			 get_class_stats(struct hfsc_classstats *,
135130365Smlaier				    struct hfsc_class *);
136130365Smlaierstatic struct hfsc_class	*clh_to_clp(struct hfsc_if *, u_int32_t);
137130365Smlaier
138130365Smlaier
139130365Smlaier#ifdef ALTQ3_COMPAT
140130365Smlaierstatic struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
141130365Smlaierstatic int hfsc_detach(struct hfsc_if *);
142130365Smlaierstatic int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
143130365Smlaier    struct service_curve *, struct service_curve *);
144130365Smlaier
145130365Smlaierstatic int hfsccmd_if_attach(struct hfsc_attach *);
146130365Smlaierstatic int hfsccmd_if_detach(struct hfsc_interface *);
147130365Smlaierstatic int hfsccmd_add_class(struct hfsc_add_class *);
148130365Smlaierstatic int hfsccmd_delete_class(struct hfsc_delete_class *);
149130365Smlaierstatic int hfsccmd_modify_class(struct hfsc_modify_class *);
150130365Smlaierstatic int hfsccmd_add_filter(struct hfsc_add_filter *);
151130365Smlaierstatic int hfsccmd_delete_filter(struct hfsc_delete_filter *);
152130365Smlaierstatic int hfsccmd_class_stats(struct hfsc_class_stats *);
153130365Smlaier
154130365Smlaieraltqdev_decl(hfsc);
155130365Smlaier#endif /* ALTQ3_COMPAT */
156130365Smlaier
157130365Smlaier/*
158130365Smlaier * macros
159130365Smlaier */
160130365Smlaier#define	is_a_parent_class(cl)	((cl)->cl_children != NULL)
161130365Smlaier
162130365Smlaier#define	HT_INFINITY	0xffffffffffffffffLL	/* infinite time value */
163130365Smlaier
164130365Smlaier#ifdef ALTQ3_COMPAT
165130365Smlaier/* hif_list keeps all hfsc_if's allocated. */
166130365Smlaierstatic struct hfsc_if *hif_list = NULL;
167130365Smlaier#endif /* ALTQ3_COMPAT */
168130365Smlaier
169130365Smlaierint
170130365Smlaierhfsc_pfattach(struct pf_altq *a)
171130365Smlaier{
172130365Smlaier	struct ifnet *ifp;
173130365Smlaier	int s, error;
174130365Smlaier
175130365Smlaier	if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
176130365Smlaier		return (EINVAL);
177130365Smlaier#ifdef __NetBSD__
178130365Smlaier	s = splnet();
179130365Smlaier#else
180130365Smlaier	s = splimp();
181130365Smlaier#endif
182130365Smlaier	error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
183130365Smlaier	    hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
184130365Smlaier	splx(s);
185130365Smlaier	return (error);
186130365Smlaier}
187130365Smlaier
188130365Smlaierint
189130365Smlaierhfsc_add_altq(struct pf_altq *a)
190130365Smlaier{
191130365Smlaier	struct hfsc_if *hif;
192130365Smlaier	struct ifnet *ifp;
193130365Smlaier
194130365Smlaier	if ((ifp = ifunit(a->ifname)) == NULL)
195130365Smlaier		return (EINVAL);
196130365Smlaier	if (!ALTQ_IS_READY(&ifp->if_snd))
197130365Smlaier		return (ENODEV);
198130365Smlaier
199240233Sglebius	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_NOWAIT | M_ZERO);
200130365Smlaier	if (hif == NULL)
201130365Smlaier		return (ENOMEM);
202130365Smlaier
203247830Sglebius	TAILQ_INIT(&hif->hif_eligible);
204130365Smlaier	hif->hif_ifq = &ifp->if_snd;
205130365Smlaier
206130365Smlaier	/* keep the state in pf_altq */
207130365Smlaier	a->altq_disc = hif;
208130365Smlaier
209130365Smlaier	return (0);
210130365Smlaier}
211130365Smlaier
212130365Smlaierint
213130365Smlaierhfsc_remove_altq(struct pf_altq *a)
214130365Smlaier{
215130365Smlaier	struct hfsc_if *hif;
216130365Smlaier
217130365Smlaier	if ((hif = a->altq_disc) == NULL)
218130365Smlaier		return (EINVAL);
219130365Smlaier	a->altq_disc = NULL;
220130365Smlaier
221130365Smlaier	(void)hfsc_clear_interface(hif);
222130365Smlaier	(void)hfsc_class_destroy(hif->hif_rootclass);
223130365Smlaier
224184205Sdes	free(hif, M_DEVBUF);
225130365Smlaier
226130365Smlaier	return (0);
227130365Smlaier}
228130365Smlaier
229130365Smlaierint
230130365Smlaierhfsc_add_queue(struct pf_altq *a)
231130365Smlaier{
232130365Smlaier	struct hfsc_if *hif;
233130365Smlaier	struct hfsc_class *cl, *parent;
234130365Smlaier	struct hfsc_opts *opts;
235130365Smlaier	struct service_curve rtsc, lssc, ulsc;
236130365Smlaier
237130365Smlaier	if ((hif = a->altq_disc) == NULL)
238130365Smlaier		return (EINVAL);
239130365Smlaier
240130365Smlaier	opts = &a->pq_u.hfsc_opts;
241130365Smlaier
242130365Smlaier	if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
243130365Smlaier	    hif->hif_rootclass == NULL)
244130365Smlaier		parent = NULL;
245130365Smlaier	else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
246130365Smlaier		return (EINVAL);
247130365Smlaier
248130365Smlaier	if (a->qid == 0)
249130365Smlaier		return (EINVAL);
250130365Smlaier
251130365Smlaier	if (clh_to_clp(hif, a->qid) != NULL)
252130365Smlaier		return (EBUSY);
253130365Smlaier
254130365Smlaier	rtsc.m1 = opts->rtsc_m1;
255130365Smlaier	rtsc.d  = opts->rtsc_d;
256130365Smlaier	rtsc.m2 = opts->rtsc_m2;
257130365Smlaier	lssc.m1 = opts->lssc_m1;
258130365Smlaier	lssc.d  = opts->lssc_d;
259130365Smlaier	lssc.m2 = opts->lssc_m2;
260130365Smlaier	ulsc.m1 = opts->ulsc_m1;
261130365Smlaier	ulsc.d  = opts->ulsc_d;
262130365Smlaier	ulsc.m2 = opts->ulsc_m2;
263130365Smlaier
264130365Smlaier	cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
265130365Smlaier	    parent, a->qlimit, opts->flags, a->qid);
266130365Smlaier	if (cl == NULL)
267130365Smlaier		return (ENOMEM);
268130365Smlaier
269130365Smlaier	return (0);
270130365Smlaier}
271130365Smlaier
272130365Smlaierint
273130365Smlaierhfsc_remove_queue(struct pf_altq *a)
274130365Smlaier{
275130365Smlaier	struct hfsc_if *hif;
276130365Smlaier	struct hfsc_class *cl;
277130365Smlaier
278130365Smlaier	if ((hif = a->altq_disc) == NULL)
279130365Smlaier		return (EINVAL);
280130365Smlaier
281130365Smlaier	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
282130365Smlaier		return (EINVAL);
283130365Smlaier
284130365Smlaier	return (hfsc_class_destroy(cl));
285130365Smlaier}
286130365Smlaier
287130365Smlaierint
288130365Smlaierhfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
289130365Smlaier{
290130365Smlaier	struct hfsc_if *hif;
291130365Smlaier	struct hfsc_class *cl;
292130365Smlaier	struct hfsc_classstats stats;
293130365Smlaier	int error = 0;
294130365Smlaier
295130365Smlaier	if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
296130365Smlaier		return (EBADF);
297130365Smlaier
298130365Smlaier	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
299130365Smlaier		return (EINVAL);
300130365Smlaier
301130365Smlaier	if (*nbytes < sizeof(stats))
302130365Smlaier		return (EINVAL);
303130365Smlaier
304130365Smlaier	get_class_stats(&stats, cl);
305130365Smlaier
306130365Smlaier	if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
307130365Smlaier		return (error);
308130365Smlaier	*nbytes = sizeof(stats);
309130365Smlaier	return (0);
310130365Smlaier}
311130365Smlaier
312130365Smlaier/*
313130365Smlaier * bring the interface back to the initial state by discarding
314130365Smlaier * all the filters and classes except the root class.
315130365Smlaier */
316130365Smlaierstatic int
317130365Smlaierhfsc_clear_interface(struct hfsc_if *hif)
318130365Smlaier{
319130365Smlaier	struct hfsc_class	*cl;
320130365Smlaier
321130365Smlaier#ifdef ALTQ3_COMPAT
322130365Smlaier	/* free the filters for this interface */
323130365Smlaier	acc_discard_filters(&hif->hif_classifier, NULL, 1);
324130365Smlaier#endif
325130365Smlaier
326130365Smlaier	/* clear out the classes */
327130365Smlaier	while (hif->hif_rootclass != NULL &&
328130365Smlaier	    (cl = hif->hif_rootclass->cl_children) != NULL) {
329130365Smlaier		/*
330130365Smlaier		 * remove the first leaf class found in the hierarchy
331130365Smlaier		 * then start over
332130365Smlaier		 */
333130365Smlaier		for (; cl != NULL; cl = hfsc_nextclass(cl)) {
334130365Smlaier			if (!is_a_parent_class(cl)) {
335130365Smlaier				(void)hfsc_class_destroy(cl);
336130365Smlaier				break;
337130365Smlaier			}
338130365Smlaier		}
339130365Smlaier	}
340130365Smlaier
341130365Smlaier	return (0);
342130365Smlaier}
343130365Smlaier
344130365Smlaierstatic int
345130365Smlaierhfsc_request(struct ifaltq *ifq, int req, void *arg)
346130365Smlaier{
347130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
348130365Smlaier
349130368Smlaier	IFQ_LOCK_ASSERT(ifq);
350130368Smlaier
351130365Smlaier	switch (req) {
352130365Smlaier	case ALTRQ_PURGE:
353130365Smlaier		hfsc_purge(hif);
354130365Smlaier		break;
355130365Smlaier	}
356130365Smlaier	return (0);
357130365Smlaier}
358130365Smlaier
359130365Smlaier/* discard all the queued packets on the interface */
360130365Smlaierstatic void
361130365Smlaierhfsc_purge(struct hfsc_if *hif)
362130365Smlaier{
363130365Smlaier	struct hfsc_class *cl;
364130365Smlaier
365130365Smlaier	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
366130365Smlaier		if (!qempty(cl->cl_q))
367130365Smlaier			hfsc_purgeq(cl);
368130365Smlaier	if (ALTQ_IS_ENABLED(hif->hif_ifq))
369130365Smlaier		hif->hif_ifq->ifq_len = 0;
370130365Smlaier}
371130365Smlaier
372130365Smlaierstruct hfsc_class *
373130365Smlaierhfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
374130365Smlaier    struct service_curve *fsc, struct service_curve *usc,
375130365Smlaier    struct hfsc_class *parent, int qlimit, int flags, int qid)
376130365Smlaier{
377130365Smlaier	struct hfsc_class *cl, *p;
378130365Smlaier	int i, s;
379130365Smlaier
380130365Smlaier	if (hif->hif_classes >= HFSC_MAX_CLASSES)
381130365Smlaier		return (NULL);
382130365Smlaier
383130365Smlaier#ifndef ALTQ_RED
384130365Smlaier	if (flags & HFCF_RED) {
385130365Smlaier#ifdef ALTQ_DEBUG
386130365Smlaier		printf("hfsc_class_create: RED not configured for HFSC!\n");
387130365Smlaier#endif
388130365Smlaier		return (NULL);
389130365Smlaier	}
390130365Smlaier#endif
391130365Smlaier
392240646Sglebius	cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_NOWAIT | M_ZERO);
393130365Smlaier	if (cl == NULL)
394130365Smlaier		return (NULL);
395130365Smlaier
396240646Sglebius	cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_NOWAIT | M_ZERO);
397130365Smlaier	if (cl->cl_q == NULL)
398130365Smlaier		goto err_ret;
399130365Smlaier
400247830Sglebius	TAILQ_INIT(&cl->cl_actc);
401130365Smlaier
402130365Smlaier	if (qlimit == 0)
403130365Smlaier		qlimit = 50;  /* use default */
404130365Smlaier	qlimit(cl->cl_q) = qlimit;
405130365Smlaier	qtype(cl->cl_q) = Q_DROPTAIL;
406130365Smlaier	qlen(cl->cl_q) = 0;
407130365Smlaier	cl->cl_flags = flags;
408130365Smlaier#ifdef ALTQ_RED
409130365Smlaier	if (flags & (HFCF_RED|HFCF_RIO)) {
410130365Smlaier		int red_flags, red_pkttime;
411130365Smlaier		u_int m2;
412130365Smlaier
413130365Smlaier		m2 = 0;
414130365Smlaier		if (rsc != NULL && rsc->m2 > m2)
415130365Smlaier			m2 = rsc->m2;
416130365Smlaier		if (fsc != NULL && fsc->m2 > m2)
417130365Smlaier			m2 = fsc->m2;
418130365Smlaier		if (usc != NULL && usc->m2 > m2)
419130365Smlaier			m2 = usc->m2;
420130365Smlaier
421130365Smlaier		red_flags = 0;
422130365Smlaier		if (flags & HFCF_ECN)
423130365Smlaier			red_flags |= REDF_ECN;
424130365Smlaier#ifdef ALTQ_RIO
425130365Smlaier		if (flags & HFCF_CLEARDSCP)
426130365Smlaier			red_flags |= RIOF_CLEARDSCP;
427130365Smlaier#endif
428130365Smlaier		if (m2 < 8)
429130365Smlaier			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
430130365Smlaier		else
431130365Smlaier			red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
432130365Smlaier				* 1000 * 1000 * 1000 / (m2 / 8);
433130365Smlaier		if (flags & HFCF_RED) {
434130365Smlaier			cl->cl_red = red_alloc(0, 0,
435130365Smlaier			    qlimit(cl->cl_q) * 10/100,
436130365Smlaier			    qlimit(cl->cl_q) * 30/100,
437130365Smlaier			    red_flags, red_pkttime);
438130365Smlaier			if (cl->cl_red != NULL)
439130365Smlaier				qtype(cl->cl_q) = Q_RED;
440130365Smlaier		}
441130365Smlaier#ifdef ALTQ_RIO
442130365Smlaier		else {
443130365Smlaier			cl->cl_red = (red_t *)rio_alloc(0, NULL,
444130365Smlaier			    red_flags, red_pkttime);
445130365Smlaier			if (cl->cl_red != NULL)
446130365Smlaier				qtype(cl->cl_q) = Q_RIO;
447130365Smlaier		}
448130365Smlaier#endif
449130365Smlaier	}
450130365Smlaier#endif /* ALTQ_RED */
451130365Smlaier
452130365Smlaier	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
453184214Sdes		cl->cl_rsc = malloc(sizeof(struct internal_sc),
454240646Sglebius		    M_DEVBUF, M_NOWAIT);
455130365Smlaier		if (cl->cl_rsc == NULL)
456130365Smlaier			goto err_ret;
457130365Smlaier		sc2isc(rsc, cl->cl_rsc);
458130365Smlaier		rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
459130365Smlaier		rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
460130365Smlaier	}
461130365Smlaier	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
462184214Sdes		cl->cl_fsc = malloc(sizeof(struct internal_sc),
463240646Sglebius		    M_DEVBUF, M_NOWAIT);
464130365Smlaier		if (cl->cl_fsc == NULL)
465130365Smlaier			goto err_ret;
466130365Smlaier		sc2isc(fsc, cl->cl_fsc);
467130365Smlaier		rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
468130365Smlaier	}
469130365Smlaier	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
470184214Sdes		cl->cl_usc = malloc(sizeof(struct internal_sc),
471240646Sglebius		    M_DEVBUF, M_NOWAIT);
472130365Smlaier		if (cl->cl_usc == NULL)
473130365Smlaier			goto err_ret;
474130365Smlaier		sc2isc(usc, cl->cl_usc);
475130365Smlaier		rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
476130365Smlaier	}
477130365Smlaier
478130365Smlaier	cl->cl_id = hif->hif_classid++;
479130365Smlaier	cl->cl_handle = qid;
480130365Smlaier	cl->cl_hif = hif;
481130365Smlaier	cl->cl_parent = parent;
482130365Smlaier
483130365Smlaier#ifdef __NetBSD__
484130365Smlaier	s = splnet();
485130365Smlaier#else
486130365Smlaier	s = splimp();
487130365Smlaier#endif
488130368Smlaier	IFQ_LOCK(hif->hif_ifq);
489130365Smlaier	hif->hif_classes++;
490130365Smlaier
491130365Smlaier	/*
492130365Smlaier	 * find a free slot in the class table.  if the slot matching
493130365Smlaier	 * the lower bits of qid is free, use this slot.  otherwise,
494130365Smlaier	 * use the first free slot.
495130365Smlaier	 */
496130365Smlaier	i = qid % HFSC_MAX_CLASSES;
497130365Smlaier	if (hif->hif_class_tbl[i] == NULL)
498130365Smlaier		hif->hif_class_tbl[i] = cl;
499130365Smlaier	else {
500130365Smlaier		for (i = 0; i < HFSC_MAX_CLASSES; i++)
501130365Smlaier			if (hif->hif_class_tbl[i] == NULL) {
502130365Smlaier				hif->hif_class_tbl[i] = cl;
503130365Smlaier				break;
504130365Smlaier			}
505130365Smlaier		if (i == HFSC_MAX_CLASSES) {
506130368Smlaier			IFQ_UNLOCK(hif->hif_ifq);
507130365Smlaier			splx(s);
508130365Smlaier			goto err_ret;
509130365Smlaier		}
510130365Smlaier	}
511130365Smlaier
512130365Smlaier	if (flags & HFCF_DEFAULTCLASS)
513130365Smlaier		hif->hif_defaultclass = cl;
514130365Smlaier
515130365Smlaier	if (parent == NULL) {
516130365Smlaier		/* this is root class */
517130365Smlaier		hif->hif_rootclass = cl;
518130365Smlaier	} else {
519130365Smlaier		/* add this class to the children list of the parent */
520130365Smlaier		if ((p = parent->cl_children) == NULL)
521130365Smlaier			parent->cl_children = cl;
522130365Smlaier		else {
523130365Smlaier			while (p->cl_siblings != NULL)
524130365Smlaier				p = p->cl_siblings;
525130365Smlaier			p->cl_siblings = cl;
526130365Smlaier		}
527130365Smlaier	}
528130368Smlaier	IFQ_UNLOCK(hif->hif_ifq);
529130365Smlaier	splx(s);
530130365Smlaier
531130365Smlaier	return (cl);
532130365Smlaier
533130365Smlaier err_ret:
534130365Smlaier	if (cl->cl_red != NULL) {
535130365Smlaier#ifdef ALTQ_RIO
536130365Smlaier		if (q_is_rio(cl->cl_q))
537130365Smlaier			rio_destroy((rio_t *)cl->cl_red);
538130365Smlaier#endif
539130365Smlaier#ifdef ALTQ_RED
540130365Smlaier		if (q_is_red(cl->cl_q))
541130365Smlaier			red_destroy(cl->cl_red);
542130365Smlaier#endif
543130365Smlaier	}
544130365Smlaier	if (cl->cl_fsc != NULL)
545184205Sdes		free(cl->cl_fsc, M_DEVBUF);
546130365Smlaier	if (cl->cl_rsc != NULL)
547184205Sdes		free(cl->cl_rsc, M_DEVBUF);
548130365Smlaier	if (cl->cl_usc != NULL)
549184205Sdes		free(cl->cl_usc, M_DEVBUF);
550130365Smlaier	if (cl->cl_q != NULL)
551184205Sdes		free(cl->cl_q, M_DEVBUF);
552184205Sdes	free(cl, M_DEVBUF);
553130365Smlaier	return (NULL);
554130365Smlaier}
555130365Smlaier
556130365Smlaierstatic int
557130365Smlaierhfsc_class_destroy(struct hfsc_class *cl)
558130365Smlaier{
559130365Smlaier	int i, s;
560130365Smlaier
561130365Smlaier	if (cl == NULL)
562130365Smlaier		return (0);
563130365Smlaier
564130365Smlaier	if (is_a_parent_class(cl))
565130365Smlaier		return (EBUSY);
566130365Smlaier
567130365Smlaier#ifdef __NetBSD__
568130365Smlaier	s = splnet();
569130365Smlaier#else
570130365Smlaier	s = splimp();
571130365Smlaier#endif
572130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
573130365Smlaier
574130365Smlaier#ifdef ALTQ3_COMPAT
575130365Smlaier	/* delete filters referencing to this class */
576130365Smlaier	acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
577130365Smlaier#endif /* ALTQ3_COMPAT */
578130365Smlaier
579130365Smlaier	if (!qempty(cl->cl_q))
580130365Smlaier		hfsc_purgeq(cl);
581130365Smlaier
582130365Smlaier	if (cl->cl_parent == NULL) {
583130365Smlaier		/* this is root class */
584130365Smlaier	} else {
585130365Smlaier		struct hfsc_class *p = cl->cl_parent->cl_children;
586130365Smlaier
587130365Smlaier		if (p == cl)
588130365Smlaier			cl->cl_parent->cl_children = cl->cl_siblings;
589130365Smlaier		else do {
590130365Smlaier			if (p->cl_siblings == cl) {
591130365Smlaier				p->cl_siblings = cl->cl_siblings;
592130365Smlaier				break;
593130365Smlaier			}
594130365Smlaier		} while ((p = p->cl_siblings) != NULL);
595130365Smlaier		ASSERT(p != NULL);
596130365Smlaier	}
597130365Smlaier
598130365Smlaier	for (i = 0; i < HFSC_MAX_CLASSES; i++)
599130365Smlaier		if (cl->cl_hif->hif_class_tbl[i] == cl) {
600130365Smlaier			cl->cl_hif->hif_class_tbl[i] = NULL;
601130365Smlaier			break;
602130365Smlaier		}
603130365Smlaier
604130365Smlaier	cl->cl_hif->hif_classes--;
605130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
606130365Smlaier	splx(s);
607130365Smlaier
608130365Smlaier	if (cl->cl_red != NULL) {
609130365Smlaier#ifdef ALTQ_RIO
610130365Smlaier		if (q_is_rio(cl->cl_q))
611130365Smlaier			rio_destroy((rio_t *)cl->cl_red);
612130365Smlaier#endif
613130365Smlaier#ifdef ALTQ_RED
614130365Smlaier		if (q_is_red(cl->cl_q))
615130365Smlaier			red_destroy(cl->cl_red);
616130365Smlaier#endif
617130365Smlaier	}
618130365Smlaier
619130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
620130365Smlaier	if (cl == cl->cl_hif->hif_rootclass)
621130365Smlaier		cl->cl_hif->hif_rootclass = NULL;
622130365Smlaier	if (cl == cl->cl_hif->hif_defaultclass)
623130365Smlaier		cl->cl_hif->hif_defaultclass = NULL;
624130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
625130365Smlaier
626130365Smlaier	if (cl->cl_usc != NULL)
627184205Sdes		free(cl->cl_usc, M_DEVBUF);
628130365Smlaier	if (cl->cl_fsc != NULL)
629184205Sdes		free(cl->cl_fsc, M_DEVBUF);
630130365Smlaier	if (cl->cl_rsc != NULL)
631184205Sdes		free(cl->cl_rsc, M_DEVBUF);
632184205Sdes	free(cl->cl_q, M_DEVBUF);
633184205Sdes	free(cl, M_DEVBUF);
634130365Smlaier
635130365Smlaier	return (0);
636130365Smlaier}
637130365Smlaier
638130365Smlaier/*
639130365Smlaier * hfsc_nextclass returns the next class in the tree.
640130365Smlaier *   usage:
641130365Smlaier *	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
642130365Smlaier *		do_something;
643130365Smlaier */
644130365Smlaierstatic struct hfsc_class *
645130365Smlaierhfsc_nextclass(struct hfsc_class *cl)
646130365Smlaier{
647130365Smlaier	if (cl->cl_children != NULL)
648130365Smlaier		cl = cl->cl_children;
649130365Smlaier	else if (cl->cl_siblings != NULL)
650130365Smlaier		cl = cl->cl_siblings;
651130365Smlaier	else {
652130365Smlaier		while ((cl = cl->cl_parent) != NULL)
653130365Smlaier			if (cl->cl_siblings) {
654130365Smlaier				cl = cl->cl_siblings;
655130365Smlaier				break;
656130365Smlaier			}
657130365Smlaier	}
658130365Smlaier
659130365Smlaier	return (cl);
660130365Smlaier}
661130365Smlaier
662130365Smlaier/*
663130365Smlaier * hfsc_enqueue is an enqueue function to be registered to
664130365Smlaier * (*altq_enqueue) in struct ifaltq.
665130365Smlaier */
666130365Smlaierstatic int
667130365Smlaierhfsc_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
668130365Smlaier{
669130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
670130365Smlaier	struct hfsc_class *cl;
671171173Smlaier	struct pf_mtag *t;
672130365Smlaier	int len;
673130365Smlaier
674130368Smlaier	IFQ_LOCK_ASSERT(ifq);
675130368Smlaier
676130365Smlaier	/* grab class set by classifier */
677130365Smlaier	if ((m->m_flags & M_PKTHDR) == 0) {
678130365Smlaier		/* should not happen */
679130365Smlaier		printf("altq: packet for %s does not have pkthdr\n",
680130365Smlaier		    ifq->altq_ifp->if_xname);
681130365Smlaier		m_freem(m);
682130365Smlaier		return (ENOBUFS);
683130365Smlaier	}
684130365Smlaier	cl = NULL;
685171173Smlaier	if ((t = pf_find_mtag(m)) != NULL)
686171173Smlaier		cl = clh_to_clp(hif, t->qid);
687130365Smlaier#ifdef ALTQ3_COMPAT
688130365Smlaier	else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
689130365Smlaier		cl = pktattr->pattr_class;
690130365Smlaier#endif
691130365Smlaier	if (cl == NULL || is_a_parent_class(cl)) {
692130365Smlaier		cl = hif->hif_defaultclass;
693130365Smlaier		if (cl == NULL) {
694130365Smlaier			m_freem(m);
695130365Smlaier			return (ENOBUFS);
696130365Smlaier		}
697130365Smlaier	}
698130365Smlaier#ifdef ALTQ3_COMPAT
699130365Smlaier	if (pktattr != NULL)
700130365Smlaier		cl->cl_pktattr = pktattr;  /* save proto hdr used by ECN */
701130365Smlaier	else
702130365Smlaier#endif
703130365Smlaier		cl->cl_pktattr = NULL;
704130365Smlaier	len = m_pktlen(m);
705130365Smlaier	if (hfsc_addq(cl, m) != 0) {
706130365Smlaier		/* drop occurred.  mbuf was freed in hfsc_addq. */
707130365Smlaier		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
708130365Smlaier		return (ENOBUFS);
709130365Smlaier	}
710130365Smlaier	IFQ_INC_LEN(ifq);
711130365Smlaier	cl->cl_hif->hif_packets++;
712130365Smlaier
713130365Smlaier	/* successfully queued. */
714130365Smlaier	if (qlen(cl->cl_q) == 1)
715130365Smlaier		set_active(cl, m_pktlen(m));
716130365Smlaier
717130365Smlaier	return (0);
718130365Smlaier}
719130365Smlaier
720130365Smlaier/*
721130365Smlaier * hfsc_dequeue is a dequeue function to be registered to
722130365Smlaier * (*altq_dequeue) in struct ifaltq.
723130365Smlaier *
724130365Smlaier * note: ALTDQ_POLL returns the next packet without removing the packet
725130365Smlaier *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
726130365Smlaier *	ALTDQ_REMOVE must return the same packet if called immediately
727130365Smlaier *	after ALTDQ_POLL.
728130365Smlaier */
729130365Smlaierstatic struct mbuf *
730130365Smlaierhfsc_dequeue(struct ifaltq *ifq, int op)
731130365Smlaier{
732130365Smlaier	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
733130365Smlaier	struct hfsc_class *cl;
734130365Smlaier	struct mbuf *m;
735130365Smlaier	int len, next_len;
736130365Smlaier	int realtime = 0;
737130365Smlaier	u_int64_t cur_time;
738130365Smlaier
739130368Smlaier	IFQ_LOCK_ASSERT(ifq);
740130368Smlaier
741130365Smlaier	if (hif->hif_packets == 0)
742130365Smlaier		/* no packet in the tree */
743130365Smlaier		return (NULL);
744130365Smlaier
745130365Smlaier	cur_time = read_machclk();
746130365Smlaier
747130365Smlaier	if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
748130365Smlaier
749130365Smlaier		cl = hif->hif_pollcache;
750130365Smlaier		hif->hif_pollcache = NULL;
751130365Smlaier		/* check if the class was scheduled by real-time criteria */
752130365Smlaier		if (cl->cl_rsc != NULL)
753130365Smlaier			realtime = (cl->cl_e <= cur_time);
754130365Smlaier	} else {
755130365Smlaier		/*
756130365Smlaier		 * if there are eligible classes, use real-time criteria.
757130365Smlaier		 * find the class with the minimum deadline among
758130365Smlaier		 * the eligible classes.
759130365Smlaier		 */
760247830Sglebius		if ((cl = hfsc_get_mindl(hif, cur_time))
761130365Smlaier		    != NULL) {
762130365Smlaier			realtime = 1;
763130365Smlaier		} else {
764130365Smlaier#ifdef ALTQ_DEBUG
765130365Smlaier			int fits = 0;
766130365Smlaier#endif
767130365Smlaier			/*
768130365Smlaier			 * use link-sharing criteria
769130365Smlaier			 * get the class with the minimum vt in the hierarchy
770130365Smlaier			 */
771130365Smlaier			cl = hif->hif_rootclass;
772130365Smlaier			while (is_a_parent_class(cl)) {
773130365Smlaier
774130365Smlaier				cl = actlist_firstfit(cl, cur_time);
775130365Smlaier				if (cl == NULL) {
776130365Smlaier#ifdef ALTQ_DEBUG
777130365Smlaier					if (fits > 0)
778130365Smlaier						printf("%d fit but none found\n",fits);
779130365Smlaier#endif
780130365Smlaier					return (NULL);
781130365Smlaier				}
782130365Smlaier				/*
783130365Smlaier				 * update parent's cl_cvtmin.
784130365Smlaier				 * don't update if the new vt is smaller.
785130365Smlaier				 */
786130365Smlaier				if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
787130365Smlaier					cl->cl_parent->cl_cvtmin = cl->cl_vt;
788130365Smlaier#ifdef ALTQ_DEBUG
789130365Smlaier				fits++;
790130365Smlaier#endif
791130365Smlaier			}
792130365Smlaier		}
793130365Smlaier
794130365Smlaier		if (op == ALTDQ_POLL) {
795130365Smlaier			hif->hif_pollcache = cl;
796130365Smlaier			m = hfsc_pollq(cl);
797130365Smlaier			return (m);
798130365Smlaier		}
799130365Smlaier	}
800130365Smlaier
801130365Smlaier	m = hfsc_getq(cl);
802130365Smlaier	if (m == NULL)
803130365Smlaier		panic("hfsc_dequeue:");
804130365Smlaier	len = m_pktlen(m);
805130365Smlaier	cl->cl_hif->hif_packets--;
806130365Smlaier	IFQ_DEC_LEN(ifq);
807130365Smlaier	PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
808130365Smlaier
809130365Smlaier	update_vf(cl, len, cur_time);
810130365Smlaier	if (realtime)
811130365Smlaier		cl->cl_cumul += len;
812130365Smlaier
813130365Smlaier	if (!qempty(cl->cl_q)) {
814130365Smlaier		if (cl->cl_rsc != NULL) {
815130365Smlaier			/* update ed */
816130365Smlaier			next_len = m_pktlen(qhead(cl->cl_q));
817130365Smlaier
818130365Smlaier			if (realtime)
819130365Smlaier				update_ed(cl, next_len);
820130365Smlaier			else
821130365Smlaier				update_d(cl, next_len);
822130365Smlaier		}
823130365Smlaier	} else {
824130365Smlaier		/* the class becomes passive */
825130365Smlaier		set_passive(cl);
826130365Smlaier	}
827130365Smlaier
828130365Smlaier	return (m);
829130365Smlaier}
830130365Smlaier
831130365Smlaierstatic int
832130365Smlaierhfsc_addq(struct hfsc_class *cl, struct mbuf *m)
833130365Smlaier{
834130365Smlaier
835130365Smlaier#ifdef ALTQ_RIO
836130365Smlaier	if (q_is_rio(cl->cl_q))
837130365Smlaier		return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
838130365Smlaier				m, cl->cl_pktattr);
839130365Smlaier#endif
840130365Smlaier#ifdef ALTQ_RED
841130365Smlaier	if (q_is_red(cl->cl_q))
842130365Smlaier		return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
843130365Smlaier#endif
844130365Smlaier	if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
845130365Smlaier		m_freem(m);
846130365Smlaier		return (-1);
847130365Smlaier	}
848130365Smlaier
849130365Smlaier	if (cl->cl_flags & HFCF_CLEARDSCP)
850130365Smlaier		write_dsfield(m, cl->cl_pktattr, 0);
851130365Smlaier
852130365Smlaier	_addq(cl->cl_q, m);
853130365Smlaier
854130365Smlaier	return (0);
855130365Smlaier}
856130365Smlaier
857130365Smlaierstatic struct mbuf *
858130365Smlaierhfsc_getq(struct hfsc_class *cl)
859130365Smlaier{
860130365Smlaier#ifdef ALTQ_RIO
861130365Smlaier	if (q_is_rio(cl->cl_q))
862130365Smlaier		return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
863130365Smlaier#endif
864130365Smlaier#ifdef ALTQ_RED
865130365Smlaier	if (q_is_red(cl->cl_q))
866130365Smlaier		return red_getq(cl->cl_red, cl->cl_q);
867130365Smlaier#endif
868130365Smlaier	return _getq(cl->cl_q);
869130365Smlaier}
870130365Smlaier
871130365Smlaierstatic struct mbuf *
872130365Smlaierhfsc_pollq(struct hfsc_class *cl)
873130365Smlaier{
874130365Smlaier	return qhead(cl->cl_q);
875130365Smlaier}
876130365Smlaier
877130365Smlaierstatic void
878130365Smlaierhfsc_purgeq(struct hfsc_class *cl)
879130365Smlaier{
880130365Smlaier	struct mbuf *m;
881130365Smlaier
882130365Smlaier	if (qempty(cl->cl_q))
883130365Smlaier		return;
884130365Smlaier
885130365Smlaier	while ((m = _getq(cl->cl_q)) != NULL) {
886130365Smlaier		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
887130365Smlaier		m_freem(m);
888130365Smlaier		cl->cl_hif->hif_packets--;
889130365Smlaier		IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
890130365Smlaier	}
891130365Smlaier	ASSERT(qlen(cl->cl_q) == 0);
892130365Smlaier
893130365Smlaier	update_vf(cl, 0, 0);	/* remove cl from the actlist */
894130365Smlaier	set_passive(cl);
895130365Smlaier}
896130365Smlaier
897130365Smlaierstatic void
898130365Smlaierset_active(struct hfsc_class *cl, int len)
899130365Smlaier{
900130365Smlaier	if (cl->cl_rsc != NULL)
901130365Smlaier		init_ed(cl, len);
902130365Smlaier	if (cl->cl_fsc != NULL)
903130365Smlaier		init_vf(cl, len);
904130365Smlaier
905130365Smlaier	cl->cl_stats.period++;
906130365Smlaier}
907130365Smlaier
908130365Smlaierstatic void
909130365Smlaierset_passive(struct hfsc_class *cl)
910130365Smlaier{
911130365Smlaier	if (cl->cl_rsc != NULL)
912130365Smlaier		ellist_remove(cl);
913130365Smlaier
914130365Smlaier	/*
915130365Smlaier	 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
916130365Smlaier	 * needs to be called explicitly to remove a class from actlist
917130365Smlaier	 */
918130365Smlaier}
919130365Smlaier
920130365Smlaierstatic void
921130365Smlaierinit_ed(struct hfsc_class *cl, int next_len)
922130365Smlaier{
923130365Smlaier	u_int64_t cur_time;
924130365Smlaier
925130365Smlaier	cur_time = read_machclk();
926130365Smlaier
927130365Smlaier	/* update the deadline curve */
928130365Smlaier	rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
929130365Smlaier
930130365Smlaier	/*
931130365Smlaier	 * update the eligible curve.
932130365Smlaier	 * for concave, it is equal to the deadline curve.
933130365Smlaier	 * for convex, it is a linear curve with slope m2.
934130365Smlaier	 */
935130365Smlaier	cl->cl_eligible = cl->cl_deadline;
936130365Smlaier	if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
937130365Smlaier		cl->cl_eligible.dx = 0;
938130365Smlaier		cl->cl_eligible.dy = 0;
939130365Smlaier	}
940130365Smlaier
941130365Smlaier	/* compute e and d */
942130365Smlaier	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
943130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
944130365Smlaier
945130365Smlaier	ellist_insert(cl);
946130365Smlaier}
947130365Smlaier
948130365Smlaierstatic void
949130365Smlaierupdate_ed(struct hfsc_class *cl, int next_len)
950130365Smlaier{
951130365Smlaier	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
952130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
953130365Smlaier
954130365Smlaier	ellist_update(cl);
955130365Smlaier}
956130365Smlaier
957130365Smlaierstatic void
958130365Smlaierupdate_d(struct hfsc_class *cl, int next_len)
959130365Smlaier{
960130365Smlaier	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
961130365Smlaier}
962130365Smlaier
963130365Smlaierstatic void
964130365Smlaierinit_vf(struct hfsc_class *cl, int len)
965130365Smlaier{
966130365Smlaier	struct hfsc_class *max_cl, *p;
967130365Smlaier	u_int64_t vt, f, cur_time;
968130365Smlaier	int go_active;
969130365Smlaier
970130365Smlaier	cur_time = 0;
971130365Smlaier	go_active = 1;
972130365Smlaier	for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
973130365Smlaier
974130365Smlaier		if (go_active && cl->cl_nactive++ == 0)
975130365Smlaier			go_active = 1;
976130365Smlaier		else
977130365Smlaier			go_active = 0;
978130365Smlaier
979130365Smlaier		if (go_active) {
980247830Sglebius			max_cl = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
981130365Smlaier			if (max_cl != NULL) {
982130365Smlaier				/*
983130365Smlaier				 * set vt to the average of the min and max
984130365Smlaier				 * classes.  if the parent's period didn't
985130365Smlaier				 * change, don't decrease vt of the class.
986130365Smlaier				 */
987130365Smlaier				vt = max_cl->cl_vt;
988130365Smlaier				if (cl->cl_parent->cl_cvtmin != 0)
989130365Smlaier					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
990130365Smlaier
991130365Smlaier				if (cl->cl_parent->cl_vtperiod !=
992130365Smlaier				    cl->cl_parentperiod || vt > cl->cl_vt)
993130365Smlaier					cl->cl_vt = vt;
994130365Smlaier			} else {
995130365Smlaier				/*
996130365Smlaier				 * first child for a new parent backlog period.
997130365Smlaier				 * add parent's cvtmax to vtoff of children
998130365Smlaier				 * to make a new vt (vtoff + vt) larger than
999130365Smlaier				 * the vt in the last period for all children.
1000130365Smlaier				 */
1001130365Smlaier				vt = cl->cl_parent->cl_cvtmax;
1002130365Smlaier				for (p = cl->cl_parent->cl_children; p != NULL;
1003130365Smlaier				     p = p->cl_siblings)
1004130365Smlaier					p->cl_vtoff += vt;
1005130365Smlaier				cl->cl_vt = 0;
1006130365Smlaier				cl->cl_parent->cl_cvtmax = 0;
1007130365Smlaier				cl->cl_parent->cl_cvtmin = 0;
1008130365Smlaier			}
1009130365Smlaier			cl->cl_initvt = cl->cl_vt;
1010130365Smlaier
1011130365Smlaier			/* update the virtual curve */
1012130365Smlaier			vt = cl->cl_vt + cl->cl_vtoff;
1013130365Smlaier			rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1014130365Smlaier			if (cl->cl_virtual.x == vt) {
1015130365Smlaier				cl->cl_virtual.x -= cl->cl_vtoff;
1016130365Smlaier				cl->cl_vtoff = 0;
1017130365Smlaier			}
1018130365Smlaier			cl->cl_vtadj = 0;
1019130365Smlaier
1020130365Smlaier			cl->cl_vtperiod++;  /* increment vt period */
1021130365Smlaier			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1022130365Smlaier			if (cl->cl_parent->cl_nactive == 0)
1023130365Smlaier				cl->cl_parentperiod++;
1024130365Smlaier			cl->cl_f = 0;
1025130365Smlaier
1026130365Smlaier			actlist_insert(cl);
1027130365Smlaier
1028130365Smlaier			if (cl->cl_usc != NULL) {
1029130365Smlaier				/* class has upper limit curve */
1030130365Smlaier				if (cur_time == 0)
1031130365Smlaier					cur_time = read_machclk();
1032130365Smlaier
1033130365Smlaier				/* update the ulimit curve */
1034130365Smlaier				rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1035130365Smlaier				    cl->cl_total);
1036130365Smlaier				/* compute myf */
1037130365Smlaier				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1038130365Smlaier				    cl->cl_total);
1039130365Smlaier				cl->cl_myfadj = 0;
1040130365Smlaier			}
1041130365Smlaier		}
1042130365Smlaier
1043130365Smlaier		if (cl->cl_myf > cl->cl_cfmin)
1044130365Smlaier			f = cl->cl_myf;
1045130365Smlaier		else
1046130365Smlaier			f = cl->cl_cfmin;
1047130365Smlaier		if (f != cl->cl_f) {
1048130365Smlaier			cl->cl_f = f;
1049130365Smlaier			update_cfmin(cl->cl_parent);
1050130365Smlaier		}
1051130365Smlaier	}
1052130365Smlaier}
1053130365Smlaier
1054130365Smlaierstatic void
1055130365Smlaierupdate_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1056130365Smlaier{
1057130365Smlaier	u_int64_t f, myf_bound, delta;
1058130365Smlaier	int go_passive;
1059130365Smlaier
1060130365Smlaier	go_passive = qempty(cl->cl_q);
1061130365Smlaier
1062130365Smlaier	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1063130365Smlaier
1064130365Smlaier		cl->cl_total += len;
1065130365Smlaier
1066130365Smlaier		if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1067130365Smlaier			continue;
1068130365Smlaier
1069130365Smlaier		if (go_passive && --cl->cl_nactive == 0)
1070130365Smlaier			go_passive = 1;
1071130365Smlaier		else
1072130365Smlaier			go_passive = 0;
1073130365Smlaier
1074130365Smlaier		if (go_passive) {
1075130365Smlaier			/* no more active child, going passive */
1076130365Smlaier
1077130365Smlaier			/* update cvtmax of the parent class */
1078130365Smlaier			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1079130365Smlaier				cl->cl_parent->cl_cvtmax = cl->cl_vt;
1080130365Smlaier
1081130365Smlaier			/* remove this class from the vt list */
1082130365Smlaier			actlist_remove(cl);
1083130365Smlaier
1084130365Smlaier			update_cfmin(cl->cl_parent);
1085130365Smlaier
1086130365Smlaier			continue;
1087130365Smlaier		}
1088130365Smlaier
1089130365Smlaier		/*
1090130365Smlaier		 * update vt and f
1091130365Smlaier		 */
1092130365Smlaier		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1093130365Smlaier		    - cl->cl_vtoff + cl->cl_vtadj;
1094130365Smlaier
1095130365Smlaier		/*
1096130365Smlaier		 * if vt of the class is smaller than cvtmin,
1097130365Smlaier		 * the class was skipped in the past due to non-fit.
1098130365Smlaier		 * if so, we need to adjust vtadj.
1099130365Smlaier		 */
1100130365Smlaier		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1101130365Smlaier			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1102130365Smlaier			cl->cl_vt = cl->cl_parent->cl_cvtmin;
1103130365Smlaier		}
1104130365Smlaier
1105130365Smlaier		/* update the vt list */
1106130365Smlaier		actlist_update(cl);
1107130365Smlaier
1108130365Smlaier		if (cl->cl_usc != NULL) {
1109130365Smlaier			cl->cl_myf = cl->cl_myfadj
1110130365Smlaier			    + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1111130365Smlaier
1112130365Smlaier			/*
1113130365Smlaier			 * if myf lags behind by more than one clock tick
1114130365Smlaier			 * from the current time, adjust myfadj to prevent
1115130365Smlaier			 * a rate-limited class from going greedy.
1116130365Smlaier			 * in a steady state under rate-limiting, myf
1117130365Smlaier			 * fluctuates within one clock tick.
1118130365Smlaier			 */
1119130365Smlaier			myf_bound = cur_time - machclk_per_tick;
1120130365Smlaier			if (cl->cl_myf < myf_bound) {
1121130365Smlaier				delta = cur_time - cl->cl_myf;
1122130365Smlaier				cl->cl_myfadj += delta;
1123130365Smlaier				cl->cl_myf += delta;
1124130365Smlaier			}
1125130365Smlaier		}
1126130365Smlaier
1127130365Smlaier		/* cl_f is max(cl_myf, cl_cfmin) */
1128130365Smlaier		if (cl->cl_myf > cl->cl_cfmin)
1129130365Smlaier			f = cl->cl_myf;
1130130365Smlaier		else
1131130365Smlaier			f = cl->cl_cfmin;
1132130365Smlaier		if (f != cl->cl_f) {
1133130365Smlaier			cl->cl_f = f;
1134130365Smlaier			update_cfmin(cl->cl_parent);
1135130365Smlaier		}
1136130365Smlaier	}
1137130365Smlaier}
1138130365Smlaier
1139130365Smlaierstatic void
1140130365Smlaierupdate_cfmin(struct hfsc_class *cl)
1141130365Smlaier{
1142130365Smlaier	struct hfsc_class *p;
1143130365Smlaier	u_int64_t cfmin;
1144130365Smlaier
1145247830Sglebius	if (TAILQ_EMPTY(&cl->cl_actc)) {
1146130365Smlaier		cl->cl_cfmin = 0;
1147130365Smlaier		return;
1148130365Smlaier	}
1149130365Smlaier	cfmin = HT_INFINITY;
1150247830Sglebius	TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1151130365Smlaier		if (p->cl_f == 0) {
1152130365Smlaier			cl->cl_cfmin = 0;
1153130365Smlaier			return;
1154130365Smlaier		}
1155130365Smlaier		if (p->cl_f < cfmin)
1156130365Smlaier			cfmin = p->cl_f;
1157130365Smlaier	}
1158130365Smlaier	cl->cl_cfmin = cfmin;
1159130365Smlaier}
1160130365Smlaier
1161130365Smlaier/*
1162130365Smlaier * TAILQ based ellist and actlist implementation
1163130365Smlaier * (ion wanted to make a calendar queue based implementation)
1164130365Smlaier */
1165130365Smlaier/*
1166130365Smlaier * eligible list holds backlogged classes being sorted by their eligible times.
1167130365Smlaier * there is one eligible list per interface.
1168130365Smlaier */
1169130365Smlaier
1170130365Smlaierstatic void
1171130365Smlaierellist_insert(struct hfsc_class *cl)
1172130365Smlaier{
1173130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1174130365Smlaier	struct hfsc_class *p;
1175130365Smlaier
1176130365Smlaier	/* check the last entry first */
1177247830Sglebius	if ((p = TAILQ_LAST(&hif->hif_eligible, elighead)) == NULL ||
1178130365Smlaier	    p->cl_e <= cl->cl_e) {
1179247830Sglebius		TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1180130365Smlaier		return;
1181130365Smlaier	}
1182130365Smlaier
1183247830Sglebius	TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1184130365Smlaier		if (cl->cl_e < p->cl_e) {
1185130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1186130365Smlaier			return;
1187130365Smlaier		}
1188130365Smlaier	}
1189130365Smlaier	ASSERT(0); /* should not reach here */
1190130365Smlaier}
1191130365Smlaier
1192130365Smlaierstatic void
1193130365Smlaierellist_remove(struct hfsc_class *cl)
1194130365Smlaier{
1195130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1196130365Smlaier
1197247830Sglebius	TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1198130365Smlaier}
1199130365Smlaier
1200130365Smlaierstatic void
1201130365Smlaierellist_update(struct hfsc_class *cl)
1202130365Smlaier{
1203130365Smlaier	struct hfsc_if	*hif = cl->cl_hif;
1204130365Smlaier	struct hfsc_class *p, *last;
1205130365Smlaier
1206130365Smlaier	/*
1207130365Smlaier	 * the eligible time of a class increases monotonically.
1208130365Smlaier	 * if the next entry has a larger eligible time, nothing to do.
1209130365Smlaier	 */
1210130365Smlaier	p = TAILQ_NEXT(cl, cl_ellist);
1211130365Smlaier	if (p == NULL || cl->cl_e <= p->cl_e)
1212130365Smlaier		return;
1213130365Smlaier
1214130365Smlaier	/* check the last entry */
1215247830Sglebius	last = TAILQ_LAST(&hif->hif_eligible, elighead);
1216130365Smlaier	ASSERT(last != NULL);
1217130365Smlaier	if (last->cl_e <= cl->cl_e) {
1218247830Sglebius		TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1219247830Sglebius		TAILQ_INSERT_TAIL(&hif->hif_eligible, cl, cl_ellist);
1220130365Smlaier		return;
1221130365Smlaier	}
1222130365Smlaier
1223130365Smlaier	/*
1224130365Smlaier	 * the new position must be between the next entry
1225130365Smlaier	 * and the last entry
1226130365Smlaier	 */
1227130365Smlaier	while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1228130365Smlaier		if (cl->cl_e < p->cl_e) {
1229247830Sglebius			TAILQ_REMOVE(&hif->hif_eligible, cl, cl_ellist);
1230130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1231130365Smlaier			return;
1232130365Smlaier		}
1233130365Smlaier	}
1234130365Smlaier	ASSERT(0); /* should not reach here */
1235130365Smlaier}
1236130365Smlaier
1237130365Smlaier/* find the class with the minimum deadline among the eligible classes */
1238130365Smlaierstruct hfsc_class *
1239247830Sglebiushfsc_get_mindl(struct hfsc_if *hif, u_int64_t cur_time)
1240130365Smlaier{
1241130365Smlaier	struct hfsc_class *p, *cl = NULL;
1242130365Smlaier
1243247830Sglebius	TAILQ_FOREACH(p, &hif->hif_eligible, cl_ellist) {
1244130365Smlaier		if (p->cl_e > cur_time)
1245130365Smlaier			break;
1246130365Smlaier		if (cl == NULL || p->cl_d < cl->cl_d)
1247130365Smlaier			cl = p;
1248130365Smlaier	}
1249130365Smlaier	return (cl);
1250130365Smlaier}
1251130365Smlaier
1252130365Smlaier/*
1253130365Smlaier * active children list holds backlogged child classes being sorted
1254130365Smlaier * by their virtual time.
1255130365Smlaier * each intermediate class has one active children list.
1256130365Smlaier */
1257130365Smlaier
1258130365Smlaierstatic void
1259130365Smlaieractlist_insert(struct hfsc_class *cl)
1260130365Smlaier{
1261130365Smlaier	struct hfsc_class *p;
1262130365Smlaier
1263130365Smlaier	/* check the last entry first */
1264247830Sglebius	if ((p = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead)) == NULL
1265130365Smlaier	    || p->cl_vt <= cl->cl_vt) {
1266247830Sglebius		TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1267130365Smlaier		return;
1268130365Smlaier	}
1269130365Smlaier
1270247830Sglebius	TAILQ_FOREACH(p, &cl->cl_parent->cl_actc, cl_actlist) {
1271130365Smlaier		if (cl->cl_vt < p->cl_vt) {
1272130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1273130365Smlaier			return;
1274130365Smlaier		}
1275130365Smlaier	}
1276130365Smlaier	ASSERT(0); /* should not reach here */
1277130365Smlaier}
1278130365Smlaier
1279130365Smlaierstatic void
1280130365Smlaieractlist_remove(struct hfsc_class *cl)
1281130365Smlaier{
1282247830Sglebius	TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1283130365Smlaier}
1284130365Smlaier
1285130365Smlaierstatic void
1286130365Smlaieractlist_update(struct hfsc_class *cl)
1287130365Smlaier{
1288130365Smlaier	struct hfsc_class *p, *last;
1289130365Smlaier
1290130365Smlaier	/*
1291130365Smlaier	 * the virtual time of a class increases monotonically during its
1292130365Smlaier	 * backlogged period.
1293130365Smlaier	 * if the next entry has a larger virtual time, nothing to do.
1294130365Smlaier	 */
1295130365Smlaier	p = TAILQ_NEXT(cl, cl_actlist);
1296130365Smlaier	if (p == NULL || cl->cl_vt < p->cl_vt)
1297130365Smlaier		return;
1298130365Smlaier
1299130365Smlaier	/* check the last entry */
1300247830Sglebius	last = TAILQ_LAST(&cl->cl_parent->cl_actc, acthead);
1301130365Smlaier	ASSERT(last != NULL);
1302130365Smlaier	if (last->cl_vt <= cl->cl_vt) {
1303247830Sglebius		TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1304247830Sglebius		TAILQ_INSERT_TAIL(&cl->cl_parent->cl_actc, cl, cl_actlist);
1305130365Smlaier		return;
1306130365Smlaier	}
1307130365Smlaier
1308130365Smlaier	/*
1309130365Smlaier	 * the new position must be between the next entry
1310130365Smlaier	 * and the last entry
1311130365Smlaier	 */
1312130365Smlaier	while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1313130365Smlaier		if (cl->cl_vt < p->cl_vt) {
1314247830Sglebius			TAILQ_REMOVE(&cl->cl_parent->cl_actc, cl, cl_actlist);
1315130365Smlaier			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1316130365Smlaier			return;
1317130365Smlaier		}
1318130365Smlaier	}
1319130365Smlaier	ASSERT(0); /* should not reach here */
1320130365Smlaier}
1321130365Smlaier
1322130365Smlaierstatic struct hfsc_class *
1323130365Smlaieractlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1324130365Smlaier{
1325130365Smlaier	struct hfsc_class *p;
1326130365Smlaier
1327247830Sglebius	TAILQ_FOREACH(p, &cl->cl_actc, cl_actlist) {
1328130365Smlaier		if (p->cl_f <= cur_time)
1329130365Smlaier			return (p);
1330130365Smlaier	}
1331130365Smlaier	return (NULL);
1332130365Smlaier}
1333130365Smlaier
1334130365Smlaier/*
1335130365Smlaier * service curve support functions
1336130365Smlaier *
1337130365Smlaier *  external service curve parameters
1338130365Smlaier *	m: bits/sec
1339130365Smlaier *	d: msec
1340130365Smlaier *  internal service curve parameters
1341130365Smlaier *	sm: (bytes/tsc_interval) << SM_SHIFT
1342130365Smlaier *	ism: (tsc_count/byte) << ISM_SHIFT
1343130365Smlaier *	dx: tsc_count
1344130365Smlaier *
1345130365Smlaier * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1346130365Smlaier * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1347130365Smlaier * speed.  SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1348130365Smlaier * digits in decimal using the following table.
1349130365Smlaier *
1350130365Smlaier *  bits/sec    100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
1351130365Smlaier *  ----------+-------------------------------------------------------
1352130365Smlaier *  bytes/nsec  12.5e-6    125e-6     1250e-6    12500e-6   125000e-6
1353130365Smlaier *  sm(500MHz)  25.0e-6    250e-6     2500e-6    25000e-6   250000e-6
1354130365Smlaier *  sm(200MHz)  62.5e-6    625e-6     6250e-6    62500e-6   625000e-6
1355130365Smlaier *
1356130365Smlaier *  nsec/byte   80000      8000       800        80         8
1357130365Smlaier *  ism(500MHz) 40000      4000       400        40         4
1358130365Smlaier *  ism(200MHz) 16000      1600       160        16         1.6
1359130365Smlaier */
1360130365Smlaier#define	SM_SHIFT	24
1361130365Smlaier#define	ISM_SHIFT	10
1362130365Smlaier
1363130365Smlaier#define	SM_MASK		((1LL << SM_SHIFT) - 1)
1364130365Smlaier#define	ISM_MASK	((1LL << ISM_SHIFT) - 1)
1365130365Smlaier
1366130365Smlaierstatic __inline u_int64_t
1367130365Smlaierseg_x2y(u_int64_t x, u_int64_t sm)
1368130365Smlaier{
1369130365Smlaier	u_int64_t y;
1370130365Smlaier
1371130365Smlaier	/*
1372130365Smlaier	 * compute
1373130365Smlaier	 *	y = x * sm >> SM_SHIFT
1374130365Smlaier	 * but divide it for the upper and lower bits to avoid overflow
1375130365Smlaier	 */
1376130365Smlaier	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1377130365Smlaier	return (y);
1378130365Smlaier}
1379130365Smlaier
1380130365Smlaierstatic __inline u_int64_t
1381130365Smlaierseg_y2x(u_int64_t y, u_int64_t ism)
1382130365Smlaier{
1383130365Smlaier	u_int64_t x;
1384130365Smlaier
1385130365Smlaier	if (y == 0)
1386130365Smlaier		x = 0;
1387130365Smlaier	else if (ism == HT_INFINITY)
1388130365Smlaier		x = HT_INFINITY;
1389130365Smlaier	else {
1390130365Smlaier		x = (y >> ISM_SHIFT) * ism
1391130365Smlaier		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1392130365Smlaier	}
1393130365Smlaier	return (x);
1394130365Smlaier}
1395130365Smlaier
1396130365Smlaierstatic __inline u_int64_t
1397130365Smlaierm2sm(u_int m)
1398130365Smlaier{
1399130365Smlaier	u_int64_t sm;
1400130365Smlaier
1401130365Smlaier	sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1402130365Smlaier	return (sm);
1403130365Smlaier}
1404130365Smlaier
1405130365Smlaierstatic __inline u_int64_t
1406130365Smlaierm2ism(u_int m)
1407130365Smlaier{
1408130365Smlaier	u_int64_t ism;
1409130365Smlaier
1410130365Smlaier	if (m == 0)
1411130365Smlaier		ism = HT_INFINITY;
1412130365Smlaier	else
1413130365Smlaier		ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1414130365Smlaier	return (ism);
1415130365Smlaier}
1416130365Smlaier
1417130365Smlaierstatic __inline u_int64_t
1418130365Smlaierd2dx(u_int d)
1419130365Smlaier{
1420130365Smlaier	u_int64_t dx;
1421130365Smlaier
1422130365Smlaier	dx = ((u_int64_t)d * machclk_freq) / 1000;
1423130365Smlaier	return (dx);
1424130365Smlaier}
1425130365Smlaier
1426130365Smlaierstatic u_int
1427130365Smlaiersm2m(u_int64_t sm)
1428130365Smlaier{
1429130365Smlaier	u_int64_t m;
1430130365Smlaier
1431130365Smlaier	m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1432130365Smlaier	return ((u_int)m);
1433130365Smlaier}
1434130365Smlaier
1435130365Smlaierstatic u_int
1436130365Smlaierdx2d(u_int64_t dx)
1437130365Smlaier{
1438130365Smlaier	u_int64_t d;
1439130365Smlaier
1440130365Smlaier	d = dx * 1000 / machclk_freq;
1441130365Smlaier	return ((u_int)d);
1442130365Smlaier}
1443130365Smlaier
1444130365Smlaierstatic void
1445130365Smlaiersc2isc(struct service_curve *sc, struct internal_sc *isc)
1446130365Smlaier{
1447130365Smlaier	isc->sm1 = m2sm(sc->m1);
1448130365Smlaier	isc->ism1 = m2ism(sc->m1);
1449130365Smlaier	isc->dx = d2dx(sc->d);
1450130365Smlaier	isc->dy = seg_x2y(isc->dx, isc->sm1);
1451130365Smlaier	isc->sm2 = m2sm(sc->m2);
1452130365Smlaier	isc->ism2 = m2ism(sc->m2);
1453130365Smlaier}
1454130365Smlaier
1455130365Smlaier/*
1456130365Smlaier * initialize the runtime service curve with the given internal
1457130365Smlaier * service curve starting at (x, y).
1458130365Smlaier */
1459130365Smlaierstatic void
1460130365Smlaierrtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1461130365Smlaier    u_int64_t y)
1462130365Smlaier{
1463130365Smlaier	rtsc->x =	x;
1464130365Smlaier	rtsc->y =	y;
1465130365Smlaier	rtsc->sm1 =	isc->sm1;
1466130365Smlaier	rtsc->ism1 =	isc->ism1;
1467130365Smlaier	rtsc->dx =	isc->dx;
1468130365Smlaier	rtsc->dy =	isc->dy;
1469130365Smlaier	rtsc->sm2 =	isc->sm2;
1470130365Smlaier	rtsc->ism2 =	isc->ism2;
1471130365Smlaier}
1472130365Smlaier
1473130365Smlaier/*
1474130365Smlaier * calculate the y-projection of the runtime service curve by the
1475130365Smlaier * given x-projection value
1476130365Smlaier */
1477130365Smlaierstatic u_int64_t
1478130365Smlaierrtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1479130365Smlaier{
1480130365Smlaier	u_int64_t	x;
1481130365Smlaier
1482130365Smlaier	if (y < rtsc->y)
1483130365Smlaier		x = rtsc->x;
1484130365Smlaier	else if (y <= rtsc->y + rtsc->dy) {
1485130365Smlaier		/* x belongs to the 1st segment */
1486130365Smlaier		if (rtsc->dy == 0)
1487130365Smlaier			x = rtsc->x + rtsc->dx;
1488130365Smlaier		else
1489130365Smlaier			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1490130365Smlaier	} else {
1491130365Smlaier		/* x belongs to the 2nd segment */
1492130365Smlaier		x = rtsc->x + rtsc->dx
1493130365Smlaier		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1494130365Smlaier	}
1495130365Smlaier	return (x);
1496130365Smlaier}
1497130365Smlaier
1498130365Smlaierstatic u_int64_t
1499130365Smlaierrtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1500130365Smlaier{
1501130365Smlaier	u_int64_t	y;
1502130365Smlaier
1503130365Smlaier	if (x <= rtsc->x)
1504130365Smlaier		y = rtsc->y;
1505130365Smlaier	else if (x <= rtsc->x + rtsc->dx)
1506130365Smlaier		/* y belongs to the 1st segment */
1507130365Smlaier		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1508130365Smlaier	else
1509130365Smlaier		/* y belongs to the 2nd segment */
1510130365Smlaier		y = rtsc->y + rtsc->dy
1511130365Smlaier		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1512130365Smlaier	return (y);
1513130365Smlaier}
1514130365Smlaier
1515130365Smlaier/*
1516130365Smlaier * update the runtime service curve by taking the minimum of the current
1517130365Smlaier * runtime service curve and the service curve starting at (x, y).
1518130365Smlaier */
1519130365Smlaierstatic void
1520130365Smlaierrtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1521130365Smlaier    u_int64_t y)
1522130365Smlaier{
1523130365Smlaier	u_int64_t	y1, y2, dx, dy;
1524130365Smlaier
1525130365Smlaier	if (isc->sm1 <= isc->sm2) {
1526130365Smlaier		/* service curve is convex */
1527130365Smlaier		y1 = rtsc_x2y(rtsc, x);
1528130365Smlaier		if (y1 < y)
1529130365Smlaier			/* the current rtsc is smaller */
1530130365Smlaier			return;
1531130365Smlaier		rtsc->x = x;
1532130365Smlaier		rtsc->y = y;
1533130365Smlaier		return;
1534130365Smlaier	}
1535130365Smlaier
1536130365Smlaier	/*
1537130365Smlaier	 * service curve is concave
1538130365Smlaier	 * compute the two y values of the current rtsc
1539130365Smlaier	 *	y1: at x
1540130365Smlaier	 *	y2: at (x + dx)
1541130365Smlaier	 */
1542130365Smlaier	y1 = rtsc_x2y(rtsc, x);
1543130365Smlaier	if (y1 <= y) {
1544130365Smlaier		/* rtsc is below isc, no change to rtsc */
1545130365Smlaier		return;
1546130365Smlaier	}
1547130365Smlaier
1548130365Smlaier	y2 = rtsc_x2y(rtsc, x + isc->dx);
1549130365Smlaier	if (y2 >= y + isc->dy) {
1550130365Smlaier		/* rtsc is above isc, replace rtsc by isc */
1551130365Smlaier		rtsc->x = x;
1552130365Smlaier		rtsc->y = y;
1553130365Smlaier		rtsc->dx = isc->dx;
1554130365Smlaier		rtsc->dy = isc->dy;
1555130365Smlaier		return;
1556130365Smlaier	}
1557130365Smlaier
1558130365Smlaier	/*
1559130365Smlaier	 * the two curves intersect
1560130365Smlaier	 * compute the offsets (dx, dy) using the reverse
1561130365Smlaier	 * function of seg_x2y()
1562130365Smlaier	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1563130365Smlaier	 */
1564130365Smlaier	dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1565130365Smlaier	/*
1566130365Smlaier	 * check if (x, y1) belongs to the 1st segment of rtsc.
1567130365Smlaier	 * if so, add the offset.
1568130365Smlaier	 */
1569130365Smlaier	if (rtsc->x + rtsc->dx > x)
1570130365Smlaier		dx += rtsc->x + rtsc->dx - x;
1571130365Smlaier	dy = seg_x2y(dx, isc->sm1);
1572130365Smlaier
1573130365Smlaier	rtsc->x = x;
1574130365Smlaier	rtsc->y = y;
1575130365Smlaier	rtsc->dx = dx;
1576130365Smlaier	rtsc->dy = dy;
1577130365Smlaier	return;
1578130365Smlaier}
1579130365Smlaier
1580130365Smlaierstatic void
1581130365Smlaierget_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1582130365Smlaier{
1583130365Smlaier	sp->class_id = cl->cl_id;
1584130365Smlaier	sp->class_handle = cl->cl_handle;
1585130365Smlaier
1586130365Smlaier	if (cl->cl_rsc != NULL) {
1587130365Smlaier		sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1588130365Smlaier		sp->rsc.d = dx2d(cl->cl_rsc->dx);
1589130365Smlaier		sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1590130365Smlaier	} else {
1591130365Smlaier		sp->rsc.m1 = 0;
1592130365Smlaier		sp->rsc.d = 0;
1593130365Smlaier		sp->rsc.m2 = 0;
1594130365Smlaier	}
1595130365Smlaier	if (cl->cl_fsc != NULL) {
1596130365Smlaier		sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1597130365Smlaier		sp->fsc.d = dx2d(cl->cl_fsc->dx);
1598130365Smlaier		sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1599130365Smlaier	} else {
1600130365Smlaier		sp->fsc.m1 = 0;
1601130365Smlaier		sp->fsc.d = 0;
1602130365Smlaier		sp->fsc.m2 = 0;
1603130365Smlaier	}
1604130365Smlaier	if (cl->cl_usc != NULL) {
1605130365Smlaier		sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1606130365Smlaier		sp->usc.d = dx2d(cl->cl_usc->dx);
1607130365Smlaier		sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1608130365Smlaier	} else {
1609130365Smlaier		sp->usc.m1 = 0;
1610130365Smlaier		sp->usc.d = 0;
1611130365Smlaier		sp->usc.m2 = 0;
1612130365Smlaier	}
1613130365Smlaier
1614130365Smlaier	sp->total = cl->cl_total;
1615130365Smlaier	sp->cumul = cl->cl_cumul;
1616130365Smlaier
1617130365Smlaier	sp->d = cl->cl_d;
1618130365Smlaier	sp->e = cl->cl_e;
1619130365Smlaier	sp->vt = cl->cl_vt;
1620130365Smlaier	sp->f = cl->cl_f;
1621130365Smlaier
1622130365Smlaier	sp->initvt = cl->cl_initvt;
1623130365Smlaier	sp->vtperiod = cl->cl_vtperiod;
1624130365Smlaier	sp->parentperiod = cl->cl_parentperiod;
1625130365Smlaier	sp->nactive = cl->cl_nactive;
1626130365Smlaier	sp->vtoff = cl->cl_vtoff;
1627130365Smlaier	sp->cvtmax = cl->cl_cvtmax;
1628130365Smlaier	sp->myf = cl->cl_myf;
1629130365Smlaier	sp->cfmin = cl->cl_cfmin;
1630130365Smlaier	sp->cvtmin = cl->cl_cvtmin;
1631130365Smlaier	sp->myfadj = cl->cl_myfadj;
1632130365Smlaier	sp->vtadj = cl->cl_vtadj;
1633130365Smlaier
1634130365Smlaier	sp->cur_time = read_machclk();
1635130365Smlaier	sp->machclk_freq = machclk_freq;
1636130365Smlaier
1637130365Smlaier	sp->qlength = qlen(cl->cl_q);
1638130365Smlaier	sp->qlimit = qlimit(cl->cl_q);
1639130365Smlaier	sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1640130365Smlaier	sp->drop_cnt = cl->cl_stats.drop_cnt;
1641130365Smlaier	sp->period = cl->cl_stats.period;
1642130365Smlaier
1643130365Smlaier	sp->qtype = qtype(cl->cl_q);
1644130365Smlaier#ifdef ALTQ_RED
1645130365Smlaier	if (q_is_red(cl->cl_q))
1646130365Smlaier		red_getstats(cl->cl_red, &sp->red[0]);
1647130365Smlaier#endif
1648130365Smlaier#ifdef ALTQ_RIO
1649130365Smlaier	if (q_is_rio(cl->cl_q))
1650130365Smlaier		rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1651130365Smlaier#endif
1652130365Smlaier}
1653130365Smlaier
1654130365Smlaier/* convert a class handle to the corresponding class pointer */
1655130365Smlaierstatic struct hfsc_class *
1656130365Smlaierclh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1657130365Smlaier{
1658130365Smlaier	int i;
1659130365Smlaier	struct hfsc_class *cl;
1660130365Smlaier
1661130365Smlaier	if (chandle == 0)
1662130365Smlaier		return (NULL);
1663130365Smlaier	/*
1664130365Smlaier	 * first, try optimistically the slot matching the lower bits of
1665130365Smlaier	 * the handle.  if it fails, do the linear table search.
1666130365Smlaier	 */
1667130365Smlaier	i = chandle % HFSC_MAX_CLASSES;
1668130365Smlaier	if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1669130365Smlaier		return (cl);
1670130365Smlaier	for (i = 0; i < HFSC_MAX_CLASSES; i++)
1671130365Smlaier		if ((cl = hif->hif_class_tbl[i]) != NULL &&
1672130365Smlaier		    cl->cl_handle == chandle)
1673130365Smlaier			return (cl);
1674130365Smlaier	return (NULL);
1675130365Smlaier}
1676130365Smlaier
1677130365Smlaier#ifdef ALTQ3_COMPAT
1678130365Smlaierstatic struct hfsc_if *
1679130365Smlaierhfsc_attach(ifq, bandwidth)
1680130365Smlaier	struct ifaltq *ifq;
1681130365Smlaier	u_int bandwidth;
1682130365Smlaier{
1683130365Smlaier	struct hfsc_if *hif;
1684130365Smlaier
1685184214Sdes	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK);
1686130365Smlaier	if (hif == NULL)
1687130365Smlaier		return (NULL);
1688130365Smlaier	bzero(hif, sizeof(struct hfsc_if));
1689130365Smlaier
1690130365Smlaier	hif->hif_eligible = ellist_alloc();
1691130365Smlaier	if (hif->hif_eligible == NULL) {
1692184205Sdes		free(hif, M_DEVBUF);
1693130365Smlaier		return NULL;
1694130365Smlaier	}
1695130365Smlaier
1696130365Smlaier	hif->hif_ifq = ifq;
1697130365Smlaier
1698130365Smlaier	/* add this state to the hfsc list */
1699130365Smlaier	hif->hif_next = hif_list;
1700130365Smlaier	hif_list = hif;
1701130365Smlaier
1702130365Smlaier	return (hif);
1703130365Smlaier}
1704130365Smlaier
1705130365Smlaierstatic int
1706130365Smlaierhfsc_detach(hif)
1707130365Smlaier	struct hfsc_if *hif;
1708130365Smlaier{
1709130365Smlaier	(void)hfsc_clear_interface(hif);
1710130365Smlaier	(void)hfsc_class_destroy(hif->hif_rootclass);
1711130365Smlaier
1712130365Smlaier	/* remove this interface from the hif list */
1713130365Smlaier	if (hif_list == hif)
1714130365Smlaier		hif_list = hif->hif_next;
1715130365Smlaier	else {
1716130365Smlaier		struct hfsc_if *h;
1717130365Smlaier
1718130365Smlaier		for (h = hif_list; h != NULL; h = h->hif_next)
1719130365Smlaier			if (h->hif_next == hif) {
1720130365Smlaier				h->hif_next = hif->hif_next;
1721130365Smlaier				break;
1722130365Smlaier			}
1723130365Smlaier		ASSERT(h != NULL);
1724130365Smlaier	}
1725130365Smlaier
1726130365Smlaier	ellist_destroy(hif->hif_eligible);
1727130365Smlaier
1728184205Sdes	free(hif, M_DEVBUF);
1729130365Smlaier
1730130365Smlaier	return (0);
1731130365Smlaier}
1732130365Smlaier
1733130365Smlaierstatic int
1734130365Smlaierhfsc_class_modify(cl, rsc, fsc, usc)
1735130365Smlaier	struct hfsc_class *cl;
1736130365Smlaier	struct service_curve *rsc, *fsc, *usc;
1737130365Smlaier{
1738130365Smlaier	struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1739130365Smlaier	u_int64_t cur_time;
1740130365Smlaier	int s;
1741130365Smlaier
1742130365Smlaier	rsc_tmp = fsc_tmp = usc_tmp = NULL;
1743130365Smlaier	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1744130365Smlaier	    cl->cl_rsc == NULL) {
1745184214Sdes		rsc_tmp = malloc(sizeof(struct internal_sc),
1746184214Sdes		    M_DEVBUF, M_WAITOK);
1747130365Smlaier		if (rsc_tmp == NULL)
1748130365Smlaier			return (ENOMEM);
1749130365Smlaier	}
1750130365Smlaier	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1751130365Smlaier	    cl->cl_fsc == NULL) {
1752184214Sdes		fsc_tmp = malloc(sizeof(struct internal_sc),
1753184214Sdes		    M_DEVBUF, M_WAITOK);
1754198952Sbrueffer		if (fsc_tmp == NULL) {
1755198952Sbrueffer			free(rsc_tmp);
1756130365Smlaier			return (ENOMEM);
1757198952Sbrueffer		}
1758130365Smlaier	}
1759130365Smlaier	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1760130365Smlaier	    cl->cl_usc == NULL) {
1761184214Sdes		usc_tmp = malloc(sizeof(struct internal_sc),
1762184214Sdes		    M_DEVBUF, M_WAITOK);
1763198952Sbrueffer		if (usc_tmp == NULL) {
1764198952Sbrueffer			free(rsc_tmp);
1765198952Sbrueffer			free(fsc_tmp);
1766130365Smlaier			return (ENOMEM);
1767198952Sbrueffer		}
1768130365Smlaier	}
1769130365Smlaier
1770130365Smlaier	cur_time = read_machclk();
1771130365Smlaier#ifdef __NetBSD__
1772130365Smlaier	s = splnet();
1773130365Smlaier#else
1774130365Smlaier	s = splimp();
1775130365Smlaier#endif
1776130368Smlaier	IFQ_LOCK(cl->cl_hif->hif_ifq);
1777130365Smlaier
1778130365Smlaier	if (rsc != NULL) {
1779130365Smlaier		if (rsc->m1 == 0 && rsc->m2 == 0) {
1780130365Smlaier			if (cl->cl_rsc != NULL) {
1781130365Smlaier				if (!qempty(cl->cl_q))
1782130365Smlaier					hfsc_purgeq(cl);
1783184205Sdes				free(cl->cl_rsc, M_DEVBUF);
1784130365Smlaier				cl->cl_rsc = NULL;
1785130365Smlaier			}
1786130365Smlaier		} else {
1787130365Smlaier			if (cl->cl_rsc == NULL)
1788130365Smlaier				cl->cl_rsc = rsc_tmp;
1789130365Smlaier			sc2isc(rsc, cl->cl_rsc);
1790130365Smlaier			rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1791130365Smlaier			    cl->cl_cumul);
1792130365Smlaier			cl->cl_eligible = cl->cl_deadline;
1793130365Smlaier			if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1794130365Smlaier				cl->cl_eligible.dx = 0;
1795130365Smlaier				cl->cl_eligible.dy = 0;
1796130365Smlaier			}
1797130365Smlaier		}
1798130365Smlaier	}
1799130365Smlaier
1800130365Smlaier	if (fsc != NULL) {
1801130365Smlaier		if (fsc->m1 == 0 && fsc->m2 == 0) {
1802130365Smlaier			if (cl->cl_fsc != NULL) {
1803130365Smlaier				if (!qempty(cl->cl_q))
1804130365Smlaier					hfsc_purgeq(cl);
1805184205Sdes				free(cl->cl_fsc, M_DEVBUF);
1806130365Smlaier				cl->cl_fsc = NULL;
1807130365Smlaier			}
1808130365Smlaier		} else {
1809130365Smlaier			if (cl->cl_fsc == NULL)
1810130365Smlaier				cl->cl_fsc = fsc_tmp;
1811130365Smlaier			sc2isc(fsc, cl->cl_fsc);
1812130365Smlaier			rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1813130365Smlaier			    cl->cl_total);
1814130365Smlaier		}
1815130365Smlaier	}
1816130365Smlaier
1817130365Smlaier	if (usc != NULL) {
1818130365Smlaier		if (usc->m1 == 0 && usc->m2 == 0) {
1819130365Smlaier			if (cl->cl_usc != NULL) {
1820184205Sdes				free(cl->cl_usc, M_DEVBUF);
1821130365Smlaier				cl->cl_usc = NULL;
1822130365Smlaier				cl->cl_myf = 0;
1823130365Smlaier			}
1824130365Smlaier		} else {
1825130365Smlaier			if (cl->cl_usc == NULL)
1826130365Smlaier				cl->cl_usc = usc_tmp;
1827130365Smlaier			sc2isc(usc, cl->cl_usc);
1828130365Smlaier			rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1829130365Smlaier			    cl->cl_total);
1830130365Smlaier		}
1831130365Smlaier	}
1832130365Smlaier
1833130365Smlaier	if (!qempty(cl->cl_q)) {
1834130365Smlaier		if (cl->cl_rsc != NULL)
1835130365Smlaier			update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1836130365Smlaier		if (cl->cl_fsc != NULL)
1837130365Smlaier			update_vf(cl, 0, cur_time);
1838130365Smlaier		/* is this enough? */
1839130365Smlaier	}
1840130365Smlaier
1841130368Smlaier	IFQ_UNLOCK(cl->cl_hif->hif_ifq);
1842130365Smlaier	splx(s);
1843130365Smlaier
1844130365Smlaier	return (0);
1845130365Smlaier}
1846130365Smlaier
1847130365Smlaier/*
1848130365Smlaier * hfsc device interface
1849130365Smlaier */
1850130365Smlaierint
1851130365Smlaierhfscopen(dev, flag, fmt, p)
1852130365Smlaier	dev_t dev;
1853130365Smlaier	int flag, fmt;
1854130365Smlaier#if (__FreeBSD_version > 500000)
1855130365Smlaier	struct thread *p;
1856130365Smlaier#else
1857130365Smlaier	struct proc *p;
1858130365Smlaier#endif
1859130365Smlaier{
1860130365Smlaier	if (machclk_freq == 0)
1861130365Smlaier		init_machclk();
1862130365Smlaier
1863130365Smlaier	if (machclk_freq == 0) {
1864130365Smlaier		printf("hfsc: no cpu clock available!\n");
1865130365Smlaier		return (ENXIO);
1866130365Smlaier	}
1867130365Smlaier
1868130365Smlaier	/* everything will be done when the queueing scheme is attached. */
1869130365Smlaier	return 0;
1870130365Smlaier}
1871130365Smlaier
1872130365Smlaierint
1873130365Smlaierhfscclose(dev, flag, fmt, p)
1874130365Smlaier	dev_t dev;
1875130365Smlaier	int flag, fmt;
1876130365Smlaier#if (__FreeBSD_version > 500000)
1877130365Smlaier	struct thread *p;
1878130365Smlaier#else
1879130365Smlaier	struct proc *p;
1880130365Smlaier#endif
1881130365Smlaier{
1882130365Smlaier	struct hfsc_if *hif;
1883130365Smlaier	int err, error = 0;
1884130365Smlaier
1885130365Smlaier	while ((hif = hif_list) != NULL) {
1886130365Smlaier		/* destroy all */
1887130365Smlaier		if (ALTQ_IS_ENABLED(hif->hif_ifq))
1888130365Smlaier			altq_disable(hif->hif_ifq);
1889130365Smlaier
1890130365Smlaier		err = altq_detach(hif->hif_ifq);
1891130365Smlaier		if (err == 0)
1892130365Smlaier			err = hfsc_detach(hif);
1893130365Smlaier		if (err != 0 && error == 0)
1894130365Smlaier			error = err;
1895130365Smlaier	}
1896130365Smlaier
1897130365Smlaier	return error;
1898130365Smlaier}
1899130365Smlaier
1900130365Smlaierint
1901130365Smlaierhfscioctl(dev, cmd, addr, flag, p)
1902130365Smlaier	dev_t dev;
1903130365Smlaier	ioctlcmd_t cmd;
1904130365Smlaier	caddr_t addr;
1905130365Smlaier	int flag;
1906130365Smlaier#if (__FreeBSD_version > 500000)
1907130365Smlaier	struct thread *p;
1908130365Smlaier#else
1909130365Smlaier	struct proc *p;
1910130365Smlaier#endif
1911130365Smlaier{
1912130365Smlaier	struct hfsc_if *hif;
1913130365Smlaier	struct hfsc_interface *ifacep;
1914130365Smlaier	int	error = 0;
1915130365Smlaier
1916130365Smlaier	/* check super-user privilege */
1917130365Smlaier	switch (cmd) {
1918130365Smlaier	case HFSC_GETSTATS:
1919130365Smlaier		break;
1920130365Smlaier	default:
1921164033Srwatson#if (__FreeBSD_version > 700000)
1922164033Srwatson		if ((error = priv_check(p, PRIV_ALTQ_MANAGE)) != 0)
1923164033Srwatson			return (error);
1924164033Srwatson#elsif (__FreeBSD_version > 400000)
1925130365Smlaier		if ((error = suser(p)) != 0)
1926130365Smlaier			return (error);
1927130365Smlaier#else
1928130365Smlaier		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1929130365Smlaier			return (error);
1930130365Smlaier#endif
1931130365Smlaier		break;
1932130365Smlaier	}
1933130365Smlaier
1934130365Smlaier	switch (cmd) {
1935130365Smlaier
1936130365Smlaier	case HFSC_IF_ATTACH:
1937130365Smlaier		error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1938130365Smlaier		break;
1939130365Smlaier
1940130365Smlaier	case HFSC_IF_DETACH:
1941130365Smlaier		error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1942130365Smlaier		break;
1943130365Smlaier
1944130365Smlaier	case HFSC_ENABLE:
1945130365Smlaier	case HFSC_DISABLE:
1946130365Smlaier	case HFSC_CLEAR_HIERARCHY:
1947130365Smlaier		ifacep = (struct hfsc_interface *)addr;
1948130365Smlaier		if ((hif = altq_lookup(ifacep->hfsc_ifname,
1949130365Smlaier				       ALTQT_HFSC)) == NULL) {
1950130365Smlaier			error = EBADF;
1951130365Smlaier			break;
1952130365Smlaier		}
1953130365Smlaier
1954130365Smlaier		switch (cmd) {
1955130365Smlaier
1956130365Smlaier		case HFSC_ENABLE:
1957130365Smlaier			if (hif->hif_defaultclass == NULL) {
1958130365Smlaier#ifdef ALTQ_DEBUG
1959130365Smlaier				printf("hfsc: no default class\n");
1960130365Smlaier#endif
1961130365Smlaier				error = EINVAL;
1962130365Smlaier				break;
1963130365Smlaier			}
1964130365Smlaier			error = altq_enable(hif->hif_ifq);
1965130365Smlaier			break;
1966130365Smlaier
1967130365Smlaier		case HFSC_DISABLE:
1968130365Smlaier			error = altq_disable(hif->hif_ifq);
1969130365Smlaier			break;
1970130365Smlaier
1971130365Smlaier		case HFSC_CLEAR_HIERARCHY:
1972130365Smlaier			hfsc_clear_interface(hif);
1973130365Smlaier			break;
1974130365Smlaier		}
1975130365Smlaier		break;
1976130365Smlaier
1977130365Smlaier	case HFSC_ADD_CLASS:
1978130365Smlaier		error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1979130365Smlaier		break;
1980130365Smlaier
1981130365Smlaier	case HFSC_DEL_CLASS:
1982130365Smlaier		error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1983130365Smlaier		break;
1984130365Smlaier
1985130365Smlaier	case HFSC_MOD_CLASS:
1986130365Smlaier		error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1987130365Smlaier		break;
1988130365Smlaier
1989130365Smlaier	case HFSC_ADD_FILTER:
1990130365Smlaier		error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1991130365Smlaier		break;
1992130365Smlaier
1993130365Smlaier	case HFSC_DEL_FILTER:
1994130365Smlaier		error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1995130365Smlaier		break;
1996130365Smlaier
1997130365Smlaier	case HFSC_GETSTATS:
1998130365Smlaier		error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1999130365Smlaier		break;
2000130365Smlaier
2001130365Smlaier	default:
2002130365Smlaier		error = EINVAL;
2003130365Smlaier		break;
2004130365Smlaier	}
2005130365Smlaier	return error;
2006130365Smlaier}
2007130365Smlaier
2008130365Smlaierstatic int
2009130365Smlaierhfsccmd_if_attach(ap)
2010130365Smlaier	struct hfsc_attach *ap;
2011130365Smlaier{
2012130365Smlaier	struct hfsc_if *hif;
2013130365Smlaier	struct ifnet *ifp;
2014130365Smlaier	int error;
2015130365Smlaier
2016130365Smlaier	if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2017130365Smlaier		return (ENXIO);
2018130365Smlaier
2019130365Smlaier	if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2020130365Smlaier		return (ENOMEM);
2021130365Smlaier
2022130365Smlaier	/*
2023130365Smlaier	 * set HFSC to this ifnet structure.
2024130365Smlaier	 */
2025130365Smlaier	if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2026130365Smlaier				 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2027130365Smlaier				 &hif->hif_classifier, acc_classify)) != 0)
2028130365Smlaier		(void)hfsc_detach(hif);
2029130365Smlaier
2030130365Smlaier	return (error);
2031130365Smlaier}
2032130365Smlaier
2033130365Smlaierstatic int
2034130365Smlaierhfsccmd_if_detach(ap)
2035130365Smlaier	struct hfsc_interface *ap;
2036130365Smlaier{
2037130365Smlaier	struct hfsc_if *hif;
2038130365Smlaier	int error;
2039130365Smlaier
2040130365Smlaier	if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2041130365Smlaier		return (EBADF);
2042130365Smlaier
2043130365Smlaier	if (ALTQ_IS_ENABLED(hif->hif_ifq))
2044130365Smlaier		altq_disable(hif->hif_ifq);
2045130365Smlaier
2046130365Smlaier	if ((error = altq_detach(hif->hif_ifq)))
2047130365Smlaier		return (error);
2048130365Smlaier
2049130365Smlaier	return hfsc_detach(hif);
2050130365Smlaier}
2051130365Smlaier
2052130365Smlaierstatic int
2053130365Smlaierhfsccmd_add_class(ap)
2054130365Smlaier	struct hfsc_add_class *ap;
2055130365Smlaier{
2056130365Smlaier	struct hfsc_if *hif;
2057130365Smlaier	struct hfsc_class *cl, *parent;
2058130365Smlaier	int	i;
2059130365Smlaier
2060130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2061130365Smlaier		return (EBADF);
2062130365Smlaier
2063130365Smlaier	if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2064130365Smlaier	    hif->hif_rootclass == NULL)
2065130365Smlaier		parent = NULL;
2066130365Smlaier	else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2067130365Smlaier		return (EINVAL);
2068130365Smlaier
2069130365Smlaier	/* assign a class handle (use a free slot number for now) */
2070130365Smlaier	for (i = 1; i < HFSC_MAX_CLASSES; i++)
2071130365Smlaier		if (hif->hif_class_tbl[i] == NULL)
2072130365Smlaier			break;
2073130365Smlaier	if (i == HFSC_MAX_CLASSES)
2074130365Smlaier		return (EBUSY);
2075130365Smlaier
2076130365Smlaier	if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2077130365Smlaier	    parent, ap->qlimit, ap->flags, i)) == NULL)
2078130365Smlaier		return (ENOMEM);
2079130365Smlaier
2080130365Smlaier	/* return a class handle to the user */
2081130365Smlaier	ap->class_handle = i;
2082130365Smlaier
2083130365Smlaier	return (0);
2084130365Smlaier}
2085130365Smlaier
2086130365Smlaierstatic int
2087130365Smlaierhfsccmd_delete_class(ap)
2088130365Smlaier	struct hfsc_delete_class *ap;
2089130365Smlaier{
2090130365Smlaier	struct hfsc_if *hif;
2091130365Smlaier	struct hfsc_class *cl;
2092130365Smlaier
2093130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2094130365Smlaier		return (EBADF);
2095130365Smlaier
2096130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2097130365Smlaier		return (EINVAL);
2098130365Smlaier
2099130365Smlaier	return hfsc_class_destroy(cl);
2100130365Smlaier}
2101130365Smlaier
2102130365Smlaierstatic int
2103130365Smlaierhfsccmd_modify_class(ap)
2104130365Smlaier	struct hfsc_modify_class *ap;
2105130365Smlaier{
2106130365Smlaier	struct hfsc_if *hif;
2107130365Smlaier	struct hfsc_class *cl;
2108130365Smlaier	struct service_curve *rsc = NULL;
2109130365Smlaier	struct service_curve *fsc = NULL;
2110130365Smlaier	struct service_curve *usc = NULL;
2111130365Smlaier
2112130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2113130365Smlaier		return (EBADF);
2114130365Smlaier
2115130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2116130365Smlaier		return (EINVAL);
2117130365Smlaier
2118130365Smlaier	if (ap->sctype & HFSC_REALTIMESC)
2119130365Smlaier		rsc = &ap->service_curve;
2120130365Smlaier	if (ap->sctype & HFSC_LINKSHARINGSC)
2121130365Smlaier		fsc = &ap->service_curve;
2122130365Smlaier	if (ap->sctype & HFSC_UPPERLIMITSC)
2123130365Smlaier		usc = &ap->service_curve;
2124130365Smlaier
2125130365Smlaier	return hfsc_class_modify(cl, rsc, fsc, usc);
2126130365Smlaier}
2127130365Smlaier
2128130365Smlaierstatic int
2129130365Smlaierhfsccmd_add_filter(ap)
2130130365Smlaier	struct hfsc_add_filter *ap;
2131130365Smlaier{
2132130365Smlaier	struct hfsc_if *hif;
2133130365Smlaier	struct hfsc_class *cl;
2134130365Smlaier
2135130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2136130365Smlaier		return (EBADF);
2137130365Smlaier
2138130365Smlaier	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2139130365Smlaier		return (EINVAL);
2140130365Smlaier
2141130365Smlaier	if (is_a_parent_class(cl)) {
2142130365Smlaier#ifdef ALTQ_DEBUG
2143130365Smlaier		printf("hfsccmd_add_filter: not a leaf class!\n");
2144130365Smlaier#endif
2145130365Smlaier		return (EINVAL);
2146130365Smlaier	}
2147130365Smlaier
2148130365Smlaier	return acc_add_filter(&hif->hif_classifier, &ap->filter,
2149130365Smlaier			      cl, &ap->filter_handle);
2150130365Smlaier}
2151130365Smlaier
2152130365Smlaierstatic int
2153130365Smlaierhfsccmd_delete_filter(ap)
2154130365Smlaier	struct hfsc_delete_filter *ap;
2155130365Smlaier{
2156130365Smlaier	struct hfsc_if *hif;
2157130365Smlaier
2158130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2159130365Smlaier		return (EBADF);
2160130365Smlaier
2161130365Smlaier	return acc_delete_filter(&hif->hif_classifier,
2162130365Smlaier				 ap->filter_handle);
2163130365Smlaier}
2164130365Smlaier
2165130365Smlaierstatic int
2166130365Smlaierhfsccmd_class_stats(ap)
2167130365Smlaier	struct hfsc_class_stats *ap;
2168130365Smlaier{
2169130365Smlaier	struct hfsc_if *hif;
2170130365Smlaier	struct hfsc_class *cl;
2171130365Smlaier	struct hfsc_classstats stats, *usp;
2172130365Smlaier	int	n, nclasses, error;
2173130365Smlaier
2174130365Smlaier	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2175130365Smlaier		return (EBADF);
2176130365Smlaier
2177130365Smlaier	ap->cur_time = read_machclk();
2178130365Smlaier	ap->machclk_freq = machclk_freq;
2179130365Smlaier	ap->hif_classes = hif->hif_classes;
2180130365Smlaier	ap->hif_packets = hif->hif_packets;
2181130365Smlaier
2182130365Smlaier	/* skip the first N classes in the tree */
2183130365Smlaier	nclasses = ap->nskip;
2184130365Smlaier	for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2185130365Smlaier	     cl = hfsc_nextclass(cl), n++)
2186130365Smlaier		;
2187130365Smlaier	if (n != nclasses)
2188130365Smlaier		return (EINVAL);
2189130365Smlaier
2190130365Smlaier	/* then, read the next N classes in the tree */
2191130365Smlaier	nclasses = ap->nclasses;
2192130365Smlaier	usp = ap->stats;
2193130365Smlaier	for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2194130365Smlaier
2195130365Smlaier		get_class_stats(&stats, cl);
2196130365Smlaier
2197130365Smlaier		if ((error = copyout((caddr_t)&stats, (caddr_t)usp++,
2198130365Smlaier				     sizeof(stats))) != 0)
2199130365Smlaier			return (error);
2200130365Smlaier	}
2201130365Smlaier
2202130365Smlaier	ap->nclasses = n;
2203130365Smlaier
2204130365Smlaier	return (0);
2205130365Smlaier}
2206130365Smlaier
2207130365Smlaier#ifdef KLD_MODULE
2208130365Smlaier
2209130365Smlaierstatic struct altqsw hfsc_sw =
2210130365Smlaier	{"hfsc", hfscopen, hfscclose, hfscioctl};
2211130365Smlaier
2212130365SmlaierALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2213130365SmlaierMODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2214130365SmlaierMODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2215130365Smlaier
2216130365Smlaier#endif /* KLD_MODULE */
2217130365Smlaier#endif /* ALTQ3_COMPAT */
2218130365Smlaier
2219130365Smlaier#endif /* ALTQ_HFSC */
2220