altq_hfsc.c revision 1.29
1/*	$NetBSD: altq_hfsc.c,v 1.29 2021/08/30 08:40:31 riastradh Exp $	*/
2/*	$KAME: altq_hfsc.c,v 1.26 2005/04/13 03:44:24 suz Exp $	*/
3
4/*
5 * Copyright (c) 1997-1999 Carnegie Mellon University. All Rights Reserved.
6 *
7 * Permission to use, copy, modify, and distribute this software and
8 * its documentation is hereby granted (including for commercial or
9 * for-profit use), provided that both the copyright notice and this
10 * permission notice appear in all copies of the software, derivative
11 * works, or modified versions, and any portions thereof.
12 *
13 * THIS SOFTWARE IS EXPERIMENTAL AND IS KNOWN TO HAVE BUGS, SOME OF
14 * WHICH MAY HAVE SERIOUS CONSEQUENCES.  CARNEGIE MELLON PROVIDES THIS
15 * SOFTWARE IN ITS ``AS IS'' CONDITION, AND ANY EXPRESS OR IMPLIED
16 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED.  IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
23 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
25 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
26 * DAMAGE.
27 *
28 * Carnegie Mellon encourages (but does not require) users of this
29 * software to return any improvements or extensions that they make,
30 * and to grant Carnegie Mellon the rights to redistribute these
31 * changes without encumbrance.
32 */
33/*
34 * H-FSC is described in Proceedings of SIGCOMM'97,
35 * "A Hierarchical Fair Service Curve Algorithm for Link-Sharing,
36 * Real-Time and Priority Service"
37 * by Ion Stoica, Hui Zhang, and T. S. Eugene Ng.
38 *
39 * Oleg Cherevko <olwi@aq.ml.com.ua> added the upperlimit for link-sharing.
40 * when a class has an upperlimit, the fit-time is computed from the
41 * upperlimit service curve.  the link-sharing scheduler does not schedule
42 * a class whose fit-time exceeds the current time.
43 */
44
45#include <sys/cdefs.h>
46__KERNEL_RCSID(0, "$NetBSD: altq_hfsc.c,v 1.29 2021/08/30 08:40:31 riastradh Exp $");
47
48#ifdef _KERNEL_OPT
49#include "opt_altq.h"
50#include "opt_inet.h"
51#include "pf.h"
52#endif
53
54#ifdef ALTQ_HFSC  /* hfsc is enabled by ALTQ_HFSC option in opt_altq.h */
55
56#include <sys/param.h>
57#include <sys/malloc.h>
58#include <sys/mbuf.h>
59#include <sys/socket.h>
60#include <sys/systm.h>
61#include <sys/errno.h>
62#include <sys/queue.h>
63#if 1 /* ALTQ3_COMPAT */
64#include <sys/sockio.h>
65#include <sys/proc.h>
66#include <sys/kernel.h>
67#endif /* ALTQ3_COMPAT */
68#include <sys/kauth.h>
69
70#include <net/if.h>
71#include <netinet/in.h>
72
73#if NPF > 0
74#include <net/pfvar.h>
75#endif
76#include <altq/altq.h>
77#include <altq/altq_hfsc.h>
78#ifdef ALTQ3_COMPAT
79#include <altq/altq_conf.h>
80#endif
81
82/*
83 * function prototypes
84 */
85static int			 hfsc_clear_interface(struct hfsc_if *);
86static int			 hfsc_request(struct ifaltq *, int, void *);
87static void			 hfsc_purge(struct hfsc_if *);
88static struct hfsc_class	*hfsc_class_create(struct hfsc_if *,
89    struct service_curve *, struct service_curve *, struct service_curve *,
90    struct hfsc_class *, int, int, int);
91static int			 hfsc_class_destroy(struct hfsc_class *);
92static struct hfsc_class	*hfsc_nextclass(struct hfsc_class *);
93static int			 hfsc_enqueue(struct ifaltq *, struct mbuf *);
94static struct mbuf		*hfsc_dequeue(struct ifaltq *, int);
95
96static int		 hfsc_addq(struct hfsc_class *, struct mbuf *);
97static struct mbuf	*hfsc_getq(struct hfsc_class *);
98static struct mbuf	*hfsc_pollq(struct hfsc_class *);
99static void		 hfsc_purgeq(struct hfsc_class *);
100
101static void		 update_cfmin(struct hfsc_class *);
102static void		 set_active(struct hfsc_class *, int);
103static void		 set_passive(struct hfsc_class *);
104
105static void		 init_ed(struct hfsc_class *, int);
106static void		 update_ed(struct hfsc_class *, int);
107static void		 update_d(struct hfsc_class *, int);
108static void		 init_vf(struct hfsc_class *, int);
109static void		 update_vf(struct hfsc_class *, int, u_int64_t);
110static ellist_t		*ellist_alloc(void);
111static void		 ellist_destroy(ellist_t *);
112static void		 ellist_insert(struct hfsc_class *);
113static void		 ellist_remove(struct hfsc_class *);
114static void		 ellist_update(struct hfsc_class *);
115struct hfsc_class	*ellist_get_mindl(ellist_t *, u_int64_t);
116static actlist_t	*actlist_alloc(void);
117static void		 actlist_destroy(actlist_t *);
118static void		 actlist_insert(struct hfsc_class *);
119static void		 actlist_remove(struct hfsc_class *);
120static void		 actlist_update(struct hfsc_class *);
121
122static struct hfsc_class	*actlist_firstfit(struct hfsc_class *,
123				    u_int64_t);
124
125static inline u_int64_t	seg_x2y(u_int64_t, u_int64_t);
126static inline u_int64_t	seg_y2x(u_int64_t, u_int64_t);
127static inline u_int64_t	m2sm(u_int);
128static inline u_int64_t	m2ism(u_int);
129static inline u_int64_t	d2dx(u_int);
130static u_int			sm2m(u_int64_t);
131static u_int			dx2d(u_int64_t);
132
133static void		sc2isc(struct service_curve *, struct internal_sc *);
134static void		rtsc_init(struct runtime_sc *, struct internal_sc *,
135			    u_int64_t, u_int64_t);
136static u_int64_t	rtsc_y2x(struct runtime_sc *, u_int64_t);
137static u_int64_t	rtsc_x2y(struct runtime_sc *, u_int64_t);
138static void		rtsc_min(struct runtime_sc *, struct internal_sc *,
139			    u_int64_t, u_int64_t);
140
141static void			 get_class_stats(struct hfsc_classstats *,
142				    struct hfsc_class *);
143static struct hfsc_class	*clh_to_clp(struct hfsc_if *, u_int32_t);
144
145
146#ifdef ALTQ3_COMPAT
147static struct hfsc_if *hfsc_attach(struct ifaltq *, u_int);
148static void hfsc_detach(struct hfsc_if *);
149static int hfsc_class_modify(struct hfsc_class *, struct service_curve *,
150    struct service_curve *, struct service_curve *);
151
152static int hfsccmd_if_attach(struct hfsc_attach *);
153static int hfsccmd_if_detach(struct hfsc_interface *);
154static int hfsccmd_add_class(struct hfsc_add_class *);
155static int hfsccmd_delete_class(struct hfsc_delete_class *);
156static int hfsccmd_modify_class(struct hfsc_modify_class *);
157static int hfsccmd_add_filter(struct hfsc_add_filter *);
158static int hfsccmd_delete_filter(struct hfsc_delete_filter *);
159static int hfsccmd_class_stats(struct hfsc_class_stats *);
160
161altqdev_decl(hfsc);
162#endif /* ALTQ3_COMPAT */
163
164/*
165 * macros
166 */
167#define	is_a_parent_class(cl)	((cl)->cl_children != NULL)
168
169#define	HT_INFINITY	0xffffffffffffffffLL	/* infinite time value */
170
171#ifdef ALTQ3_COMPAT
172/* hif_list keeps all hfsc_if's allocated. */
173static struct hfsc_if *hif_list = NULL;
174#endif /* ALTQ3_COMPAT */
175
176#if NPF > 0
177int
178hfsc_pfattach(struct pf_altq *a)
179{
180	struct ifnet *ifp;
181	int s, error;
182
183	if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
184		return (EINVAL);
185	s = splnet();
186	error = altq_attach(&ifp->if_snd, ALTQT_HFSC, a->altq_disc,
187	    hfsc_enqueue, hfsc_dequeue, hfsc_request, NULL, NULL);
188	splx(s);
189	return (error);
190}
191
192int
193hfsc_add_altq(struct pf_altq *a)
194{
195	struct hfsc_if *hif;
196	struct ifnet *ifp;
197
198	if ((ifp = ifunit(a->ifname)) == NULL)
199		return (EINVAL);
200	if (!ALTQ_IS_READY(&ifp->if_snd))
201		return (ENODEV);
202
203	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
204	if (hif == NULL)
205		return (ENOMEM);
206
207	hif->hif_eligible = ellist_alloc();
208	if (hif->hif_eligible == NULL) {
209		free(hif, M_DEVBUF);
210		return (ENOMEM);
211	}
212
213	hif->hif_ifq = &ifp->if_snd;
214
215	/* keep the state in pf_altq */
216	a->altq_disc = hif;
217
218	return (0);
219}
220
221int
222hfsc_remove_altq(struct pf_altq *a)
223{
224	struct hfsc_if *hif;
225
226	if ((hif = a->altq_disc) == NULL)
227		return (EINVAL);
228	a->altq_disc = NULL;
229
230	(void)hfsc_clear_interface(hif);
231	(void)hfsc_class_destroy(hif->hif_rootclass);
232
233	ellist_destroy(hif->hif_eligible);
234
235	free(hif, M_DEVBUF);
236
237	return (0);
238}
239
240int
241hfsc_add_queue(struct pf_altq *a)
242{
243	struct hfsc_if *hif;
244	struct hfsc_class *cl, *parent;
245	struct hfsc_opts *opts;
246	struct service_curve rtsc, lssc, ulsc;
247
248	if ((hif = a->altq_disc) == NULL)
249		return (EINVAL);
250
251	opts = &a->pq_u.hfsc_opts;
252
253	if (a->parent_qid == HFSC_NULLCLASS_HANDLE &&
254	    hif->hif_rootclass == NULL)
255		parent = NULL;
256	else if ((parent = clh_to_clp(hif, a->parent_qid)) == NULL)
257		return (EINVAL);
258
259	if (a->qid == 0)
260		return (EINVAL);
261
262	if (clh_to_clp(hif, a->qid) != NULL)
263		return (EBUSY);
264
265	rtsc.m1 = opts->rtsc_m1;
266	rtsc.d  = opts->rtsc_d;
267	rtsc.m2 = opts->rtsc_m2;
268	lssc.m1 = opts->lssc_m1;
269	lssc.d  = opts->lssc_d;
270	lssc.m2 = opts->lssc_m2;
271	ulsc.m1 = opts->ulsc_m1;
272	ulsc.d  = opts->ulsc_d;
273	ulsc.m2 = opts->ulsc_m2;
274
275	cl = hfsc_class_create(hif, &rtsc, &lssc, &ulsc,
276	    parent, a->qlimit, opts->flags, a->qid);
277	if (cl == NULL)
278		return (ENOMEM);
279
280	return (0);
281}
282
283int
284hfsc_remove_queue(struct pf_altq *a)
285{
286	struct hfsc_if *hif;
287	struct hfsc_class *cl;
288
289	if ((hif = a->altq_disc) == NULL)
290		return (EINVAL);
291
292	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
293		return (EINVAL);
294
295	return (hfsc_class_destroy(cl));
296}
297
298int
299hfsc_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
300{
301	struct hfsc_if *hif;
302	struct hfsc_class *cl;
303	struct hfsc_classstats stats;
304	int error = 0;
305
306	if ((hif = altq_lookup(a->ifname, ALTQT_HFSC)) == NULL)
307		return (EBADF);
308
309	if ((cl = clh_to_clp(hif, a->qid)) == NULL)
310		return (EINVAL);
311
312	if (*nbytes < sizeof(stats))
313		return (EINVAL);
314
315	memset(&stats, 0, sizeof(stats));
316	get_class_stats(&stats, cl);
317
318	if ((error = copyout((void *)&stats, ubuf, sizeof(stats))) != 0)
319		return (error);
320	*nbytes = sizeof(stats);
321	return (0);
322}
323#endif /* NPF > 0 */
324
325/*
326 * bring the interface back to the initial state by discarding
327 * all the filters and classes except the root class.
328 */
329static int
330hfsc_clear_interface(struct hfsc_if *hif)
331{
332	struct hfsc_class	*cl;
333
334#ifdef ALTQ3_COMPAT
335	/* free the filters for this interface */
336	acc_discard_filters(&hif->hif_classifier, NULL, 1);
337#endif
338
339	/* clear out the classes */
340	while (hif->hif_rootclass != NULL &&
341	    (cl = hif->hif_rootclass->cl_children) != NULL) {
342		/*
343		 * remove the first leaf class found in the hierarchy
344		 * then start over
345		 */
346		for (; cl != NULL; cl = hfsc_nextclass(cl)) {
347			if (!is_a_parent_class(cl)) {
348				(void)hfsc_class_destroy(cl);
349				break;
350			}
351		}
352	}
353
354	return (0);
355}
356
357static int
358hfsc_request(struct ifaltq *ifq, int req, void *arg)
359{
360	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
361
362	switch (req) {
363	case ALTRQ_PURGE:
364		hfsc_purge(hif);
365		break;
366	}
367	return (0);
368}
369
370/* discard all the queued packets on the interface */
371static void
372hfsc_purge(struct hfsc_if *hif)
373{
374	struct hfsc_class *cl;
375
376	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
377		if (!qempty(cl->cl_q))
378			hfsc_purgeq(cl);
379	if (ALTQ_IS_ENABLED(hif->hif_ifq))
380		hif->hif_ifq->ifq_len = 0;
381}
382
383struct hfsc_class *
384hfsc_class_create(struct hfsc_if *hif, struct service_curve *rsc,
385    struct service_curve *fsc, struct service_curve *usc,
386    struct hfsc_class *parent, int qlimit, int flags, int qid)
387{
388	struct hfsc_class *cl, *p;
389	int i, s;
390
391	if (hif->hif_classes >= HFSC_MAX_CLASSES)
392		return (NULL);
393
394#ifndef ALTQ_RED
395	if (flags & HFCF_RED) {
396#ifdef ALTQ_DEBUG
397		printf("hfsc_class_create: RED not configured for HFSC!\n");
398#endif
399		return (NULL);
400	}
401#endif
402
403	cl = malloc(sizeof(struct hfsc_class), M_DEVBUF, M_WAITOK|M_ZERO);
404	if (cl == NULL)
405		return (NULL);
406
407	cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO);
408	if (cl->cl_q == NULL)
409		goto err_ret;
410
411	cl->cl_actc = actlist_alloc();
412	if (cl->cl_actc == NULL)
413		goto err_ret;
414
415	if (qlimit == 0)
416		qlimit = 50;  /* use default */
417	qlimit(cl->cl_q) = qlimit;
418	qtype(cl->cl_q) = Q_DROPTAIL;
419	qlen(cl->cl_q) = 0;
420	cl->cl_flags = flags;
421#ifdef ALTQ_RED
422	if (flags & (HFCF_RED|HFCF_RIO)) {
423		int red_flags, red_pkttime;
424		u_int m2;
425
426		m2 = 0;
427		if (rsc != NULL && rsc->m2 > m2)
428			m2 = rsc->m2;
429		if (fsc != NULL && fsc->m2 > m2)
430			m2 = fsc->m2;
431		if (usc != NULL && usc->m2 > m2)
432			m2 = usc->m2;
433
434		red_flags = 0;
435		if (flags & HFCF_ECN)
436			red_flags |= REDF_ECN;
437#ifdef ALTQ_RIO
438		if (flags & HFCF_CLEARDSCP)
439			red_flags |= RIOF_CLEARDSCP;
440#endif
441		if (m2 < 8)
442			red_pkttime = 1000 * 1000 * 1000; /* 1 sec */
443		else
444			red_pkttime = (int64_t)hif->hif_ifq->altq_ifp->if_mtu
445				* 1000 * 1000 * 1000 / (m2 / 8);
446		if (flags & HFCF_RED) {
447			cl->cl_red = red_alloc(0, 0,
448			    qlimit(cl->cl_q) * 10/100,
449			    qlimit(cl->cl_q) * 30/100,
450			    red_flags, red_pkttime);
451			if (cl->cl_red != NULL)
452				qtype(cl->cl_q) = Q_RED;
453		}
454#ifdef ALTQ_RIO
455		else {
456			cl->cl_red = (red_t *)rio_alloc(0, NULL,
457			    red_flags, red_pkttime);
458			if (cl->cl_red != NULL)
459				qtype(cl->cl_q) = Q_RIO;
460		}
461#endif
462	}
463#endif /* ALTQ_RED */
464
465	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0)) {
466		cl->cl_rsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
467		    M_WAITOK|M_ZERO);
468		if (cl->cl_rsc == NULL)
469			goto err_ret;
470		sc2isc(rsc, cl->cl_rsc);
471		rtsc_init(&cl->cl_deadline, cl->cl_rsc, 0, 0);
472		rtsc_init(&cl->cl_eligible, cl->cl_rsc, 0, 0);
473	}
474	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0)) {
475		cl->cl_fsc = malloc(sizeof(struct internal_sc), M_DEVBUF,
476		    M_WAITOK|M_ZERO);
477		if (cl->cl_fsc == NULL)
478			goto err_ret;
479		sc2isc(fsc, cl->cl_fsc);
480		rtsc_init(&cl->cl_virtual, cl->cl_fsc, 0, 0);
481	}
482	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0)) {
483		cl->cl_usc = malloc(sizeof(struct internal_sc), M_DEVBUF,
484		    M_WAITOK|M_ZERO);
485		if (cl->cl_usc == NULL)
486			goto err_ret;
487		sc2isc(usc, cl->cl_usc);
488		rtsc_init(&cl->cl_ulimit, cl->cl_usc, 0, 0);
489	}
490
491	cl->cl_id = hif->hif_classid++;
492	cl->cl_handle = qid;
493	cl->cl_hif = hif;
494	cl->cl_parent = parent;
495
496	s = splnet();
497	hif->hif_classes++;
498
499	/*
500	 * find a free slot in the class table.  if the slot matching
501	 * the lower bits of qid is free, use this slot.  otherwise,
502	 * use the first free slot.
503	 */
504	i = qid % HFSC_MAX_CLASSES;
505	if (hif->hif_class_tbl[i] == NULL)
506		hif->hif_class_tbl[i] = cl;
507	else {
508		for (i = 0; i < HFSC_MAX_CLASSES; i++)
509			if (hif->hif_class_tbl[i] == NULL) {
510				hif->hif_class_tbl[i] = cl;
511				break;
512			}
513		if (i == HFSC_MAX_CLASSES) {
514			splx(s);
515			goto err_ret;
516		}
517	}
518
519	if (flags & HFCF_DEFAULTCLASS)
520		hif->hif_defaultclass = cl;
521
522	if (parent == NULL) {
523		/* this is root class */
524		hif->hif_rootclass = cl;
525	} else {
526		/* add this class to the children list of the parent */
527		if ((p = parent->cl_children) == NULL)
528			parent->cl_children = cl;
529		else {
530			while (p->cl_siblings != NULL)
531				p = p->cl_siblings;
532			p->cl_siblings = cl;
533		}
534	}
535	splx(s);
536
537	return (cl);
538
539 err_ret:
540	if (cl->cl_actc != NULL)
541		actlist_destroy(cl->cl_actc);
542	if (cl->cl_red != NULL) {
543#ifdef ALTQ_RIO
544		if (q_is_rio(cl->cl_q))
545			rio_destroy((rio_t *)cl->cl_red);
546#endif
547#ifdef ALTQ_RED
548		if (q_is_red(cl->cl_q))
549			red_destroy(cl->cl_red);
550#endif
551	}
552	if (cl->cl_fsc != NULL)
553		free(cl->cl_fsc, M_DEVBUF);
554	if (cl->cl_rsc != NULL)
555		free(cl->cl_rsc, M_DEVBUF);
556	if (cl->cl_usc != NULL)
557		free(cl->cl_usc, M_DEVBUF);
558	if (cl->cl_q != NULL)
559		free(cl->cl_q, M_DEVBUF);
560	free(cl, M_DEVBUF);
561	return (NULL);
562}
563
564static int
565hfsc_class_destroy(struct hfsc_class *cl)
566{
567	int i, s;
568
569	if (cl == NULL)
570		return (0);
571
572	if (is_a_parent_class(cl))
573		return (EBUSY);
574
575	s = splnet();
576
577#ifdef ALTQ3_COMPAT
578	/* delete filters referencing to this class */
579	acc_discard_filters(&cl->cl_hif->hif_classifier, cl, 0);
580#endif /* ALTQ3_COMPAT */
581
582	if (!qempty(cl->cl_q))
583		hfsc_purgeq(cl);
584
585	if (cl->cl_parent == NULL) {
586		/* this is root class */
587	} else {
588		struct hfsc_class *p = cl->cl_parent->cl_children;
589
590		if (p == cl)
591			cl->cl_parent->cl_children = cl->cl_siblings;
592		else do {
593			if (p->cl_siblings == cl) {
594				p->cl_siblings = cl->cl_siblings;
595				break;
596			}
597		} while ((p = p->cl_siblings) != NULL);
598		ASSERT(p != NULL);
599	}
600
601	for (i = 0; i < HFSC_MAX_CLASSES; i++)
602		if (cl->cl_hif->hif_class_tbl[i] == cl) {
603			cl->cl_hif->hif_class_tbl[i] = NULL;
604			break;
605		}
606
607	cl->cl_hif->hif_classes--;
608	splx(s);
609
610	actlist_destroy(cl->cl_actc);
611
612	if (cl->cl_red != NULL) {
613#ifdef ALTQ_RIO
614		if (q_is_rio(cl->cl_q))
615			rio_destroy((rio_t *)cl->cl_red);
616#endif
617#ifdef ALTQ_RED
618		if (q_is_red(cl->cl_q))
619			red_destroy(cl->cl_red);
620#endif
621	}
622
623	if (cl == cl->cl_hif->hif_rootclass)
624		cl->cl_hif->hif_rootclass = NULL;
625	if (cl == cl->cl_hif->hif_defaultclass)
626		cl->cl_hif->hif_defaultclass = NULL;
627
628	if (cl->cl_usc != NULL)
629		free(cl->cl_usc, M_DEVBUF);
630	if (cl->cl_fsc != NULL)
631		free(cl->cl_fsc, M_DEVBUF);
632	if (cl->cl_rsc != NULL)
633		free(cl->cl_rsc, M_DEVBUF);
634	free(cl->cl_q, M_DEVBUF);
635	free(cl, M_DEVBUF);
636
637	return (0);
638}
639
640/*
641 * hfsc_nextclass returns the next class in the tree.
642 *   usage:
643 *	for (cl = hif->hif_rootclass; cl != NULL; cl = hfsc_nextclass(cl))
644 *		do_something;
645 */
646static struct hfsc_class *
647hfsc_nextclass(struct hfsc_class *cl)
648{
649	if (cl->cl_children != NULL)
650		cl = cl->cl_children;
651	else if (cl->cl_siblings != NULL)
652		cl = cl->cl_siblings;
653	else {
654		while ((cl = cl->cl_parent) != NULL)
655			if (cl->cl_siblings) {
656				cl = cl->cl_siblings;
657				break;
658			}
659	}
660
661	return (cl);
662}
663
664/*
665 * hfsc_enqueue is an enqueue function to be registered to
666 * (*altq_enqueue) in struct ifaltq.
667 */
668static int
669hfsc_enqueue(struct ifaltq *ifq, struct mbuf *m)
670{
671	struct altq_pktattr pktattr;
672	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
673	struct hfsc_class *cl;
674	struct m_tag *t;
675	int len;
676
677	/* grab class set by classifier */
678	if ((m->m_flags & M_PKTHDR) == 0) {
679		/* should not happen */
680		printf("altq: packet for %s does not have pkthdr\n",
681		    ifq->altq_ifp->if_xname);
682		m_freem(m);
683		return (ENOBUFS);
684	}
685	cl = NULL;
686	if ((t = m_tag_find(m, PACKET_TAG_ALTQ_QID)) != NULL)
687		cl = clh_to_clp(hif, ((struct altq_tag *)(t+1))->qid);
688#ifdef ALTQ3_COMPAT
689	else if ((ifq->altq_flags & ALTQF_CLASSIFY))
690		cl = m->m_pkthdr.pattr_class;
691#endif
692	if (cl == NULL || is_a_parent_class(cl)) {
693		cl = hif->hif_defaultclass;
694		if (cl == NULL) {
695			m_freem(m);
696			return (ENOBUFS);
697		}
698	}
699#ifdef ALTQ3_COMPAT
700	if (m->m_pkthdr.pattr_af != AF_UNSPEC) {
701		pktattr.pattr_class = m->m_pkthdr.pattr_class;
702		pktattr.pattr_af = m->m_pkthdr.pattr_af;
703		pktattr.pattr_hdr = m->m_pkthdr.pattr_hdr;
704
705		cl->cl_pktattr = &pktattr;  /* save proto hdr used by ECN */
706	} else
707#endif
708		cl->cl_pktattr = NULL;
709	len = m_pktlen(m);
710	if (hfsc_addq(cl, m) != 0) {
711		/* drop occurred.  mbuf was freed in hfsc_addq. */
712		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, len);
713		return (ENOBUFS);
714	}
715	IFQ_INC_LEN(ifq);
716	cl->cl_hif->hif_packets++;
717
718	/* successfully queued. */
719	if (qlen(cl->cl_q) == 1)
720		set_active(cl, m_pktlen(m));
721
722	return (0);
723}
724
725/*
726 * hfsc_dequeue is a dequeue function to be registered to
727 * (*altq_dequeue) in struct ifaltq.
728 *
729 * note: ALTDQ_POLL returns the next packet without removing the packet
730 *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
731 *	ALTDQ_REMOVE must return the same packet if called immediately
732 *	after ALTDQ_POLL.
733 */
734static struct mbuf *
735hfsc_dequeue(struct ifaltq *ifq, int op)
736{
737	struct hfsc_if	*hif = (struct hfsc_if *)ifq->altq_disc;
738	struct hfsc_class *cl;
739	struct mbuf *m;
740	int len, next_len;
741	int realtime = 0;
742	u_int64_t cur_time;
743
744	if (hif->hif_packets == 0)
745		/* no packet in the tree */
746		return (NULL);
747
748	cur_time = read_machclk();
749
750	if (op == ALTDQ_REMOVE && hif->hif_pollcache != NULL) {
751
752		cl = hif->hif_pollcache;
753		hif->hif_pollcache = NULL;
754		/* check if the class was scheduled by real-time criteria */
755		if (cl->cl_rsc != NULL)
756			realtime = (cl->cl_e <= cur_time);
757	} else {
758		/*
759		 * if there are eligible classes, use real-time criteria.
760		 * find the class with the minimum deadline among
761		 * the eligible classes.
762		 */
763		if ((cl = ellist_get_mindl(hif->hif_eligible, cur_time))
764		    != NULL) {
765			realtime = 1;
766		} else {
767#ifdef ALTQ_DEBUG
768			int fits = 0;
769#endif
770			/*
771			 * use link-sharing criteria
772			 * get the class with the minimum vt in the hierarchy
773			 */
774			cl = hif->hif_rootclass;
775			while (is_a_parent_class(cl)) {
776
777				cl = actlist_firstfit(cl, cur_time);
778				if (cl == NULL) {
779#ifdef ALTQ_DEBUG
780					if (fits > 0)
781						printf("%d fit but none found\n",fits);
782#endif
783					return (NULL);
784				}
785				/*
786				 * update parent's cl_cvtmin.
787				 * don't update if the new vt is smaller.
788				 */
789				if (cl->cl_parent->cl_cvtmin < cl->cl_vt)
790					cl->cl_parent->cl_cvtmin = cl->cl_vt;
791#ifdef ALTQ_DEBUG
792				fits++;
793#endif
794			}
795		}
796
797		if (op == ALTDQ_POLL) {
798			hif->hif_pollcache = cl;
799			m = hfsc_pollq(cl);
800			return (m);
801		}
802	}
803
804	m = hfsc_getq(cl);
805	if (m == NULL)
806		panic("hfsc_dequeue:");
807	len = m_pktlen(m);
808	cl->cl_hif->hif_packets--;
809	IFQ_DEC_LEN(ifq);
810	PKTCNTR_ADD(&cl->cl_stats.xmit_cnt, len);
811
812	update_vf(cl, len, cur_time);
813	if (realtime)
814		cl->cl_cumul += len;
815
816	if (!qempty(cl->cl_q)) {
817		if (cl->cl_rsc != NULL) {
818			/* update ed */
819			next_len = m_pktlen(qhead(cl->cl_q));
820
821			if (realtime)
822				update_ed(cl, next_len);
823			else
824				update_d(cl, next_len);
825		}
826	} else {
827		/* the class becomes passive */
828		set_passive(cl);
829	}
830
831	return (m);
832}
833
834static int
835hfsc_addq(struct hfsc_class *cl, struct mbuf *m)
836{
837
838#ifdef ALTQ_RIO
839	if (q_is_rio(cl->cl_q))
840		return rio_addq((rio_t *)cl->cl_red, cl->cl_q,
841				m, cl->cl_pktattr);
842#endif
843#ifdef ALTQ_RED
844	if (q_is_red(cl->cl_q))
845		return red_addq(cl->cl_red, cl->cl_q, m, cl->cl_pktattr);
846#endif
847	if (qlen(cl->cl_q) >= qlimit(cl->cl_q)) {
848		m_freem(m);
849		return (-1);
850	}
851
852	if (cl->cl_flags & HFCF_CLEARDSCP)
853		write_dsfield(m, cl->cl_pktattr, 0);
854
855	_addq(cl->cl_q, m);
856
857	return (0);
858}
859
860static struct mbuf *
861hfsc_getq(struct hfsc_class *cl)
862{
863#ifdef ALTQ_RIO
864	if (q_is_rio(cl->cl_q))
865		return rio_getq((rio_t *)cl->cl_red, cl->cl_q);
866#endif
867#ifdef ALTQ_RED
868	if (q_is_red(cl->cl_q))
869		return red_getq(cl->cl_red, cl->cl_q);
870#endif
871	return _getq(cl->cl_q);
872}
873
874static struct mbuf *
875hfsc_pollq(struct hfsc_class *cl)
876{
877	return qhead(cl->cl_q);
878}
879
880static void
881hfsc_purgeq(struct hfsc_class *cl)
882{
883	struct mbuf *m;
884
885	if (qempty(cl->cl_q))
886		return;
887
888	while ((m = _getq(cl->cl_q)) != NULL) {
889		PKTCNTR_ADD(&cl->cl_stats.drop_cnt, m_pktlen(m));
890		m_freem(m);
891		cl->cl_hif->hif_packets--;
892		IFQ_DEC_LEN(cl->cl_hif->hif_ifq);
893	}
894	ASSERT(qlen(cl->cl_q) == 0);
895
896	update_vf(cl, 0, 0);	/* remove cl from the actlist */
897	set_passive(cl);
898}
899
900static void
901set_active(struct hfsc_class *cl, int len)
902{
903	if (cl->cl_rsc != NULL)
904		init_ed(cl, len);
905	if (cl->cl_fsc != NULL)
906		init_vf(cl, len);
907
908	cl->cl_stats.period++;
909}
910
911static void
912set_passive(struct hfsc_class *cl)
913{
914	if (cl->cl_rsc != NULL)
915		ellist_remove(cl);
916
917	/*
918	 * actlist is now handled in update_vf() so that update_vf(cl, 0, 0)
919	 * needs to be called explicitly to remove a class from actlist
920	 */
921}
922
923static void
924init_ed(struct hfsc_class *cl, int next_len)
925{
926	u_int64_t cur_time;
927
928	cur_time = read_machclk();
929
930	/* update the deadline curve */
931	rtsc_min(&cl->cl_deadline, cl->cl_rsc, cur_time, cl->cl_cumul);
932
933	/*
934	 * update the eligible curve.
935	 * for concave, it is equal to the deadline curve.
936	 * for convex, it is a linear curve with slope m2.
937	 */
938	cl->cl_eligible = cl->cl_deadline;
939	if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
940		cl->cl_eligible.dx = 0;
941		cl->cl_eligible.dy = 0;
942	}
943
944	/* compute e and d */
945	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
946	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
947
948	ellist_insert(cl);
949}
950
951static void
952update_ed(struct hfsc_class *cl, int next_len)
953{
954	cl->cl_e = rtsc_y2x(&cl->cl_eligible, cl->cl_cumul);
955	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
956
957	ellist_update(cl);
958}
959
960static void
961update_d(struct hfsc_class *cl, int next_len)
962{
963	cl->cl_d = rtsc_y2x(&cl->cl_deadline, cl->cl_cumul + next_len);
964}
965
966static void
967init_vf(struct hfsc_class *cl, int len)
968{
969	struct hfsc_class *max_cl, *p;
970	u_int64_t vt, f, cur_time;
971	int go_active;
972
973	cur_time = 0;
974	go_active = 1;
975	for ( ; cl->cl_parent != NULL; cl = cl->cl_parent) {
976
977		if (go_active && cl->cl_nactive++ == 0)
978			go_active = 1;
979		else
980			go_active = 0;
981
982		if (go_active) {
983			max_cl = actlist_last(cl->cl_parent->cl_actc);
984			if (max_cl != NULL) {
985				/*
986				 * set vt to the average of the min and max
987				 * classes.  if the parent's period didn't
988				 * change, don't decrease vt of the class.
989				 */
990				vt = max_cl->cl_vt;
991				if (cl->cl_parent->cl_cvtmin != 0)
992					vt = (cl->cl_parent->cl_cvtmin + vt)/2;
993
994				if (cl->cl_parent->cl_vtperiod !=
995				    cl->cl_parentperiod || vt > cl->cl_vt)
996					cl->cl_vt = vt;
997			} else {
998				/*
999				 * first child for a new parent backlog period.
1000				 * add parent's cvtmax to vtoff of children
1001				 * to make a new vt (vtoff + vt) larger than
1002				 * the vt in the last period for all children.
1003				 */
1004				vt = cl->cl_parent->cl_cvtmax;
1005				for (p = cl->cl_parent->cl_children; p != NULL;
1006				     p = p->cl_siblings)
1007					p->cl_vtoff += vt;
1008				cl->cl_vt = 0;
1009				cl->cl_parent->cl_cvtmax = 0;
1010				cl->cl_parent->cl_cvtmin = 0;
1011			}
1012			cl->cl_initvt = cl->cl_vt;
1013
1014			/* update the virtual curve */
1015			vt = cl->cl_vt + cl->cl_vtoff;
1016			rtsc_min(&cl->cl_virtual, cl->cl_fsc, vt, cl->cl_total);
1017			if (cl->cl_virtual.x == vt) {
1018				cl->cl_virtual.x -= cl->cl_vtoff;
1019				cl->cl_vtoff = 0;
1020			}
1021			cl->cl_vtadj = 0;
1022
1023			cl->cl_vtperiod++;  /* increment vt period */
1024			cl->cl_parentperiod = cl->cl_parent->cl_vtperiod;
1025			if (cl->cl_parent->cl_nactive == 0)
1026				cl->cl_parentperiod++;
1027			cl->cl_f = 0;
1028
1029			actlist_insert(cl);
1030
1031			if (cl->cl_usc != NULL) {
1032				/* class has upper limit curve */
1033				if (cur_time == 0)
1034					cur_time = read_machclk();
1035
1036				/* update the ulimit curve */
1037				rtsc_min(&cl->cl_ulimit, cl->cl_usc, cur_time,
1038				    cl->cl_total);
1039				/* compute myf */
1040				cl->cl_myf = rtsc_y2x(&cl->cl_ulimit,
1041				    cl->cl_total);
1042				cl->cl_myfadj = 0;
1043			}
1044		}
1045
1046		if (cl->cl_myf > cl->cl_cfmin)
1047			f = cl->cl_myf;
1048		else
1049			f = cl->cl_cfmin;
1050		if (f != cl->cl_f) {
1051			cl->cl_f = f;
1052			update_cfmin(cl->cl_parent);
1053		}
1054	}
1055}
1056
1057static void
1058update_vf(struct hfsc_class *cl, int len, u_int64_t cur_time)
1059{
1060	u_int64_t f, myf_bound, delta;
1061	int go_passive;
1062
1063	go_passive = qempty(cl->cl_q);
1064
1065	for (; cl->cl_parent != NULL; cl = cl->cl_parent) {
1066
1067		cl->cl_total += len;
1068
1069		if (cl->cl_fsc == NULL || cl->cl_nactive == 0)
1070			continue;
1071
1072		if (go_passive && --cl->cl_nactive == 0)
1073			go_passive = 1;
1074		else
1075			go_passive = 0;
1076
1077		if (go_passive) {
1078			/* no more active child, going passive */
1079
1080			/* update cvtmax of the parent class */
1081			if (cl->cl_vt > cl->cl_parent->cl_cvtmax)
1082				cl->cl_parent->cl_cvtmax = cl->cl_vt;
1083
1084			/* remove this class from the vt list */
1085			actlist_remove(cl);
1086
1087			update_cfmin(cl->cl_parent);
1088
1089			continue;
1090		}
1091
1092		/*
1093		 * update vt and f
1094		 */
1095		cl->cl_vt = rtsc_y2x(&cl->cl_virtual, cl->cl_total)
1096		    - cl->cl_vtoff + cl->cl_vtadj;
1097
1098		/*
1099		 * if vt of the class is smaller than cvtmin,
1100		 * the class was skipped in the past due to non-fit.
1101		 * if so, we need to adjust vtadj.
1102		 */
1103		if (cl->cl_vt < cl->cl_parent->cl_cvtmin) {
1104			cl->cl_vtadj += cl->cl_parent->cl_cvtmin - cl->cl_vt;
1105			cl->cl_vt = cl->cl_parent->cl_cvtmin;
1106		}
1107
1108		/* update the vt list */
1109		actlist_update(cl);
1110
1111		if (cl->cl_usc != NULL) {
1112			cl->cl_myf = cl->cl_myfadj
1113			    + rtsc_y2x(&cl->cl_ulimit, cl->cl_total);
1114
1115			/*
1116			 * if myf lags behind by more than one clock tick
1117			 * from the current time, adjust myfadj to prevent
1118			 * a rate-limited class from going greedy.
1119			 * in a steady state under rate-limiting, myf
1120			 * fluctuates within one clock tick.
1121			 */
1122			myf_bound = cur_time - machclk_per_tick;
1123			if (cl->cl_myf < myf_bound) {
1124				delta = cur_time - cl->cl_myf;
1125				cl->cl_myfadj += delta;
1126				cl->cl_myf += delta;
1127			}
1128		}
1129
1130		/* cl_f is max(cl_myf, cl_cfmin) */
1131		if (cl->cl_myf > cl->cl_cfmin)
1132			f = cl->cl_myf;
1133		else
1134			f = cl->cl_cfmin;
1135		if (f != cl->cl_f) {
1136			cl->cl_f = f;
1137			update_cfmin(cl->cl_parent);
1138		}
1139	}
1140}
1141
1142static void
1143update_cfmin(struct hfsc_class *cl)
1144{
1145	struct hfsc_class *p;
1146	u_int64_t cfmin;
1147
1148	if (TAILQ_EMPTY(cl->cl_actc)) {
1149		cl->cl_cfmin = 0;
1150		return;
1151	}
1152	cfmin = HT_INFINITY;
1153	TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1154		if (p->cl_f == 0) {
1155			cl->cl_cfmin = 0;
1156			return;
1157		}
1158		if (p->cl_f < cfmin)
1159			cfmin = p->cl_f;
1160	}
1161	cl->cl_cfmin = cfmin;
1162}
1163
1164/*
1165 * TAILQ based ellist and actlist implementation
1166 * (ion wanted to make a calendar queue based implementation)
1167 */
1168/*
1169 * eligible list holds backlogged classes being sorted by their eligible times.
1170 * there is one eligible list per interface.
1171 */
1172
1173static ellist_t *
1174ellist_alloc(void)
1175{
1176	ellist_t *head;
1177
1178	head = malloc(sizeof(ellist_t), M_DEVBUF, M_WAITOK);
1179	TAILQ_INIT(head);
1180	return (head);
1181}
1182
1183static void
1184ellist_destroy(ellist_t *head)
1185{
1186	free(head, M_DEVBUF);
1187}
1188
1189static void
1190ellist_insert(struct hfsc_class *cl)
1191{
1192	struct hfsc_if	*hif = cl->cl_hif;
1193	struct hfsc_class *p;
1194
1195	/* check the last entry first */
1196	if ((p = TAILQ_LAST(hif->hif_eligible, _eligible)) == NULL ||
1197	    p->cl_e <= cl->cl_e) {
1198		TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1199		return;
1200	}
1201
1202	TAILQ_FOREACH(p, hif->hif_eligible, cl_ellist) {
1203		if (cl->cl_e < p->cl_e) {
1204			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1205			return;
1206		}
1207	}
1208	ASSERT(0); /* should not reach here */
1209}
1210
1211static void
1212ellist_remove(struct hfsc_class *cl)
1213{
1214	struct hfsc_if	*hif = cl->cl_hif;
1215
1216	TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1217}
1218
1219static void
1220ellist_update(struct hfsc_class *cl)
1221{
1222	struct hfsc_if	*hif = cl->cl_hif;
1223	struct hfsc_class *p, *last;
1224
1225	/*
1226	 * the eligible time of a class increases monotonically.
1227	 * if the next entry has a larger eligible time, nothing to do.
1228	 */
1229	p = TAILQ_NEXT(cl, cl_ellist);
1230	if (p == NULL || cl->cl_e <= p->cl_e)
1231		return;
1232
1233	/* check the last entry */
1234	last = TAILQ_LAST(hif->hif_eligible, _eligible);
1235	ASSERT(last != NULL);
1236	if (last->cl_e <= cl->cl_e) {
1237		TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1238		TAILQ_INSERT_TAIL(hif->hif_eligible, cl, cl_ellist);
1239		return;
1240	}
1241
1242	/*
1243	 * the new position must be between the next entry
1244	 * and the last entry
1245	 */
1246	while ((p = TAILQ_NEXT(p, cl_ellist)) != NULL) {
1247		if (cl->cl_e < p->cl_e) {
1248			TAILQ_REMOVE(hif->hif_eligible, cl, cl_ellist);
1249			TAILQ_INSERT_BEFORE(p, cl, cl_ellist);
1250			return;
1251		}
1252	}
1253	ASSERT(0); /* should not reach here */
1254}
1255
1256/* find the class with the minimum deadline among the eligible classes */
1257struct hfsc_class *
1258ellist_get_mindl(ellist_t *head, u_int64_t cur_time)
1259{
1260	struct hfsc_class *p, *cl = NULL;
1261
1262	TAILQ_FOREACH(p, head, cl_ellist) {
1263		if (p->cl_e > cur_time)
1264			break;
1265		if (cl == NULL || p->cl_d < cl->cl_d)
1266			cl = p;
1267	}
1268	return (cl);
1269}
1270
1271/*
1272 * active children list holds backlogged child classes being sorted
1273 * by their virtual time.
1274 * each intermediate class has one active children list.
1275 */
1276static actlist_t *
1277actlist_alloc(void)
1278{
1279	actlist_t *head;
1280
1281	head = malloc(sizeof(actlist_t), M_DEVBUF, M_WAITOK);
1282	TAILQ_INIT(head);
1283	return (head);
1284}
1285
1286static void
1287actlist_destroy(actlist_t *head)
1288{
1289	free(head, M_DEVBUF);
1290}
1291static void
1292actlist_insert(struct hfsc_class *cl)
1293{
1294	struct hfsc_class *p;
1295
1296	/* check the last entry first */
1297	if ((p = TAILQ_LAST(cl->cl_parent->cl_actc, _active)) == NULL
1298	    || p->cl_vt <= cl->cl_vt) {
1299		TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1300		return;
1301	}
1302
1303	TAILQ_FOREACH(p, cl->cl_parent->cl_actc, cl_actlist) {
1304		if (cl->cl_vt < p->cl_vt) {
1305			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1306			return;
1307		}
1308	}
1309	ASSERT(0); /* should not reach here */
1310}
1311
1312static void
1313actlist_remove(struct hfsc_class *cl)
1314{
1315	TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1316}
1317
1318static void
1319actlist_update(struct hfsc_class *cl)
1320{
1321	struct hfsc_class *p, *last;
1322
1323	/*
1324	 * the virtual time of a class increases monotonically during its
1325	 * backlogged period.
1326	 * if the next entry has a larger virtual time, nothing to do.
1327	 */
1328	p = TAILQ_NEXT(cl, cl_actlist);
1329	if (p == NULL || cl->cl_vt < p->cl_vt)
1330		return;
1331
1332	/* check the last entry */
1333	last = TAILQ_LAST(cl->cl_parent->cl_actc, _active);
1334	ASSERT(last != NULL);
1335	if (last->cl_vt <= cl->cl_vt) {
1336		TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1337		TAILQ_INSERT_TAIL(cl->cl_parent->cl_actc, cl, cl_actlist);
1338		return;
1339	}
1340
1341	/*
1342	 * the new position must be between the next entry
1343	 * and the last entry
1344	 */
1345	while ((p = TAILQ_NEXT(p, cl_actlist)) != NULL) {
1346		if (cl->cl_vt < p->cl_vt) {
1347			TAILQ_REMOVE(cl->cl_parent->cl_actc, cl, cl_actlist);
1348			TAILQ_INSERT_BEFORE(p, cl, cl_actlist);
1349			return;
1350		}
1351	}
1352	ASSERT(0); /* should not reach here */
1353}
1354
1355static struct hfsc_class *
1356actlist_firstfit(struct hfsc_class *cl, u_int64_t cur_time)
1357{
1358	struct hfsc_class *p;
1359
1360	TAILQ_FOREACH(p, cl->cl_actc, cl_actlist) {
1361		if (p->cl_f <= cur_time)
1362			return (p);
1363	}
1364	return (NULL);
1365}
1366
1367/*
1368 * service curve support functions
1369 *
1370 *  external service curve parameters
1371 *	m: bits/sec
1372 *	d: msec
1373 *  internal service curve parameters
1374 *	sm: (bytes/tsc_interval) << SM_SHIFT
1375 *	ism: (tsc_count/byte) << ISM_SHIFT
1376 *	dx: tsc_count
1377 *
1378 * SM_SHIFT and ISM_SHIFT are scaled in order to keep effective digits.
1379 * we should be able to handle 100K-1Gbps linkspeed with 200Hz-1GHz CPU
1380 * speed.  SM_SHIFT and ISM_SHIFT are selected to have at least 3 effective
1381 * digits in decimal using the following table.
1382 *
1383 *  bits/sec    100Kbps     1Mbps     10Mbps     100Mbps    1Gbps
1384 *  ----------+-------------------------------------------------------
1385 *  bytes/nsec  12.5e-6    125e-6     1250e-6    12500e-6   125000e-6
1386 *  sm(500MHz)  25.0e-6    250e-6     2500e-6    25000e-6   250000e-6
1387 *  sm(200MHz)  62.5e-6    625e-6     6250e-6    62500e-6   625000e-6
1388 *
1389 *  nsec/byte   80000      8000       800        80         8
1390 *  ism(500MHz) 40000      4000       400        40         4
1391 *  ism(200MHz) 16000      1600       160        16         1.6
1392 */
1393#define	SM_SHIFT	24
1394#define	ISM_SHIFT	10
1395
1396#define	SM_MASK		((1LL << SM_SHIFT) - 1)
1397#define	ISM_MASK	((1LL << ISM_SHIFT) - 1)
1398
1399static inline u_int64_t
1400seg_x2y(u_int64_t x, u_int64_t sm)
1401{
1402	u_int64_t y;
1403
1404	/*
1405	 * compute
1406	 *	y = x * sm >> SM_SHIFT
1407	 * but divide it for the upper and lower bits to avoid overflow
1408	 */
1409	y = (x >> SM_SHIFT) * sm + (((x & SM_MASK) * sm) >> SM_SHIFT);
1410	return (y);
1411}
1412
1413static inline u_int64_t
1414seg_y2x(u_int64_t y, u_int64_t ism)
1415{
1416	u_int64_t x;
1417
1418	if (y == 0)
1419		x = 0;
1420	else if (ism == HT_INFINITY)
1421		x = HT_INFINITY;
1422	else {
1423		x = (y >> ISM_SHIFT) * ism
1424		    + (((y & ISM_MASK) * ism) >> ISM_SHIFT);
1425	}
1426	return (x);
1427}
1428
1429static inline u_int64_t
1430m2sm(u_int m)
1431{
1432	u_int64_t sm;
1433
1434	sm = ((u_int64_t)m << SM_SHIFT) / 8 / machclk_freq;
1435	return (sm);
1436}
1437
1438static inline u_int64_t
1439m2ism(u_int m)
1440{
1441	u_int64_t ism;
1442
1443	if (m == 0)
1444		ism = HT_INFINITY;
1445	else
1446		ism = ((u_int64_t)machclk_freq << ISM_SHIFT) * 8 / m;
1447	return (ism);
1448}
1449
1450static inline u_int64_t
1451d2dx(u_int d)
1452{
1453	u_int64_t dx;
1454
1455	dx = ((u_int64_t)d * machclk_freq) / 1000;
1456	return (dx);
1457}
1458
1459static u_int
1460sm2m(u_int64_t sm)
1461{
1462	u_int64_t m;
1463
1464	m = (sm * 8 * machclk_freq) >> SM_SHIFT;
1465	return ((u_int)m);
1466}
1467
1468static u_int
1469dx2d(u_int64_t dx)
1470{
1471	u_int64_t d;
1472
1473	d = dx * 1000 / machclk_freq;
1474	return ((u_int)d);
1475}
1476
1477static void
1478sc2isc(struct service_curve *sc, struct internal_sc *isc)
1479{
1480	isc->sm1 = m2sm(sc->m1);
1481	isc->ism1 = m2ism(sc->m1);
1482	isc->dx = d2dx(sc->d);
1483	isc->dy = seg_x2y(isc->dx, isc->sm1);
1484	isc->sm2 = m2sm(sc->m2);
1485	isc->ism2 = m2ism(sc->m2);
1486}
1487
1488/*
1489 * initialize the runtime service curve with the given internal
1490 * service curve starting at (x, y).
1491 */
1492static void
1493rtsc_init(struct runtime_sc *rtsc, struct internal_sc * isc, u_int64_t x,
1494    u_int64_t y)
1495{
1496	rtsc->x =	x;
1497	rtsc->y =	y;
1498	rtsc->sm1 =	isc->sm1;
1499	rtsc->ism1 =	isc->ism1;
1500	rtsc->dx =	isc->dx;
1501	rtsc->dy =	isc->dy;
1502	rtsc->sm2 =	isc->sm2;
1503	rtsc->ism2 =	isc->ism2;
1504}
1505
1506/*
1507 * calculate the y-projection of the runtime service curve by the
1508 * given x-projection value
1509 */
1510static u_int64_t
1511rtsc_y2x(struct runtime_sc *rtsc, u_int64_t y)
1512{
1513	u_int64_t	x;
1514
1515	if (y < rtsc->y)
1516		x = rtsc->x;
1517	else if (y <= rtsc->y + rtsc->dy) {
1518		/* x belongs to the 1st segment */
1519		if (rtsc->dy == 0)
1520			x = rtsc->x + rtsc->dx;
1521		else
1522			x = rtsc->x + seg_y2x(y - rtsc->y, rtsc->ism1);
1523	} else {
1524		/* x belongs to the 2nd segment */
1525		x = rtsc->x + rtsc->dx
1526		    + seg_y2x(y - rtsc->y - rtsc->dy, rtsc->ism2);
1527	}
1528	return (x);
1529}
1530
1531static u_int64_t
1532rtsc_x2y(struct runtime_sc *rtsc, u_int64_t x)
1533{
1534	u_int64_t	y;
1535
1536	if (x <= rtsc->x)
1537		y = rtsc->y;
1538	else if (x <= rtsc->x + rtsc->dx)
1539		/* y belongs to the 1st segment */
1540		y = rtsc->y + seg_x2y(x - rtsc->x, rtsc->sm1);
1541	else
1542		/* y belongs to the 2nd segment */
1543		y = rtsc->y + rtsc->dy
1544		    + seg_x2y(x - rtsc->x - rtsc->dx, rtsc->sm2);
1545	return (y);
1546}
1547
1548/*
1549 * update the runtime service curve by taking the minimum of the current
1550 * runtime service curve and the service curve starting at (x, y).
1551 */
1552static void
1553rtsc_min(struct runtime_sc *rtsc, struct internal_sc *isc, u_int64_t x,
1554    u_int64_t y)
1555{
1556	u_int64_t	y1, y2, dx, dy;
1557
1558	if (isc->sm1 <= isc->sm2) {
1559		/* service curve is convex */
1560		y1 = rtsc_x2y(rtsc, x);
1561		if (y1 < y)
1562			/* the current rtsc is smaller */
1563			return;
1564		rtsc->x = x;
1565		rtsc->y = y;
1566		return;
1567	}
1568
1569	/*
1570	 * service curve is concave
1571	 * compute the two y values of the current rtsc
1572	 *	y1: at x
1573	 *	y2: at (x + dx)
1574	 */
1575	y1 = rtsc_x2y(rtsc, x);
1576	if (y1 <= y) {
1577		/* rtsc is below isc, no change to rtsc */
1578		return;
1579	}
1580
1581	y2 = rtsc_x2y(rtsc, x + isc->dx);
1582	if (y2 >= y + isc->dy) {
1583		/* rtsc is above isc, replace rtsc by isc */
1584		rtsc->x = x;
1585		rtsc->y = y;
1586		rtsc->dx = isc->dx;
1587		rtsc->dy = isc->dy;
1588		return;
1589	}
1590
1591	/*
1592	 * the two curves intersect
1593	 * compute the offsets (dx, dy) using the reverse
1594	 * function of seg_x2y()
1595	 *	seg_x2y(dx, sm1) == seg_x2y(dx, sm2) + (y1 - y)
1596	 */
1597	dx = ((y1 - y) << SM_SHIFT) / (isc->sm1 - isc->sm2);
1598	/*
1599	 * check if (x, y1) belongs to the 1st segment of rtsc.
1600	 * if so, add the offset.
1601	 */
1602	if (rtsc->x + rtsc->dx > x)
1603		dx += rtsc->x + rtsc->dx - x;
1604	dy = seg_x2y(dx, isc->sm1);
1605
1606	rtsc->x = x;
1607	rtsc->y = y;
1608	rtsc->dx = dx;
1609	rtsc->dy = dy;
1610	return;
1611}
1612
1613static void
1614get_class_stats(struct hfsc_classstats *sp, struct hfsc_class *cl)
1615{
1616	sp->class_id = cl->cl_id;
1617	sp->class_handle = cl->cl_handle;
1618
1619	if (cl->cl_rsc != NULL) {
1620		sp->rsc.m1 = sm2m(cl->cl_rsc->sm1);
1621		sp->rsc.d = dx2d(cl->cl_rsc->dx);
1622		sp->rsc.m2 = sm2m(cl->cl_rsc->sm2);
1623	} else {
1624		sp->rsc.m1 = 0;
1625		sp->rsc.d = 0;
1626		sp->rsc.m2 = 0;
1627	}
1628	if (cl->cl_fsc != NULL) {
1629		sp->fsc.m1 = sm2m(cl->cl_fsc->sm1);
1630		sp->fsc.d = dx2d(cl->cl_fsc->dx);
1631		sp->fsc.m2 = sm2m(cl->cl_fsc->sm2);
1632	} else {
1633		sp->fsc.m1 = 0;
1634		sp->fsc.d = 0;
1635		sp->fsc.m2 = 0;
1636	}
1637	if (cl->cl_usc != NULL) {
1638		sp->usc.m1 = sm2m(cl->cl_usc->sm1);
1639		sp->usc.d = dx2d(cl->cl_usc->dx);
1640		sp->usc.m2 = sm2m(cl->cl_usc->sm2);
1641	} else {
1642		sp->usc.m1 = 0;
1643		sp->usc.d = 0;
1644		sp->usc.m2 = 0;
1645	}
1646
1647	sp->total = cl->cl_total;
1648	sp->cumul = cl->cl_cumul;
1649
1650	sp->d = cl->cl_d;
1651	sp->e = cl->cl_e;
1652	sp->vt = cl->cl_vt;
1653	sp->f = cl->cl_f;
1654
1655	sp->initvt = cl->cl_initvt;
1656	sp->vtperiod = cl->cl_vtperiod;
1657	sp->parentperiod = cl->cl_parentperiod;
1658	sp->nactive = cl->cl_nactive;
1659	sp->vtoff = cl->cl_vtoff;
1660	sp->cvtmax = cl->cl_cvtmax;
1661	sp->myf = cl->cl_myf;
1662	sp->cfmin = cl->cl_cfmin;
1663	sp->cvtmin = cl->cl_cvtmin;
1664	sp->myfadj = cl->cl_myfadj;
1665	sp->vtadj = cl->cl_vtadj;
1666
1667	sp->cur_time = read_machclk();
1668	sp->machclk_freq = machclk_freq;
1669
1670	sp->qlength = qlen(cl->cl_q);
1671	sp->qlimit = qlimit(cl->cl_q);
1672	sp->xmit_cnt = cl->cl_stats.xmit_cnt;
1673	sp->drop_cnt = cl->cl_stats.drop_cnt;
1674	sp->period = cl->cl_stats.period;
1675
1676	sp->qtype = qtype(cl->cl_q);
1677#ifdef ALTQ_RED
1678	if (q_is_red(cl->cl_q))
1679		red_getstats(cl->cl_red, &sp->red[0]);
1680#endif
1681#ifdef ALTQ_RIO
1682	if (q_is_rio(cl->cl_q))
1683		rio_getstats((rio_t *)cl->cl_red, &sp->red[0]);
1684#endif
1685}
1686
1687/* convert a class handle to the corresponding class pointer */
1688static struct hfsc_class *
1689clh_to_clp(struct hfsc_if *hif, u_int32_t chandle)
1690{
1691	int i;
1692	struct hfsc_class *cl;
1693
1694	if (chandle == 0)
1695		return (NULL);
1696	/*
1697	 * first, try optimistically the slot matching the lower bits of
1698	 * the handle.  if it fails, do the linear table search.
1699	 */
1700	i = chandle % HFSC_MAX_CLASSES;
1701	if ((cl = hif->hif_class_tbl[i]) != NULL && cl->cl_handle == chandle)
1702		return (cl);
1703	for (i = 0; i < HFSC_MAX_CLASSES; i++)
1704		if ((cl = hif->hif_class_tbl[i]) != NULL &&
1705		    cl->cl_handle == chandle)
1706			return (cl);
1707	return (NULL);
1708}
1709
1710#ifdef ALTQ3_COMPAT
1711static struct hfsc_if *
1712hfsc_attach(struct ifaltq *ifq, u_int bandwidth)
1713{
1714	struct hfsc_if *hif;
1715
1716	hif = malloc(sizeof(struct hfsc_if), M_DEVBUF, M_WAITOK|M_ZERO);
1717	if (hif == NULL)
1718		return (NULL);
1719
1720	hif->hif_eligible = ellist_alloc();
1721	if (hif->hif_eligible == NULL) {
1722		free(hif, M_DEVBUF);
1723		return NULL;
1724	}
1725
1726	hif->hif_ifq = ifq;
1727
1728	/* add this state to the hfsc list */
1729	hif->hif_next = hif_list;
1730	hif_list = hif;
1731
1732	return (hif);
1733}
1734
1735static void
1736hfsc_detach(struct hfsc_if *hif)
1737{
1738	(void)hfsc_clear_interface(hif);
1739	(void)hfsc_class_destroy(hif->hif_rootclass);
1740
1741	/* remove this interface from the hif list */
1742	if (hif_list == hif)
1743		hif_list = hif->hif_next;
1744	else {
1745		struct hfsc_if *h;
1746
1747		for (h = hif_list; h != NULL; h = h->hif_next)
1748			if (h->hif_next == hif) {
1749				h->hif_next = hif->hif_next;
1750				break;
1751			}
1752		ASSERT(h != NULL);
1753	}
1754
1755	ellist_destroy(hif->hif_eligible);
1756
1757	free(hif, M_DEVBUF);
1758}
1759
1760static int
1761hfsc_class_modify(struct hfsc_class *cl, struct service_curve *rsc,
1762    struct service_curve *fsc, struct service_curve *usc)
1763{
1764	struct internal_sc *rsc_tmp, *fsc_tmp, *usc_tmp;
1765	u_int64_t cur_time;
1766	int s;
1767
1768	rsc_tmp = fsc_tmp = usc_tmp = NULL;
1769	if (rsc != NULL && (rsc->m1 != 0 || rsc->m2 != 0) &&
1770	    cl->cl_rsc == NULL) {
1771		rsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1772		    M_WAITOK);
1773		if (rsc_tmp == NULL)
1774			return (ENOMEM);
1775	}
1776	if (fsc != NULL && (fsc->m1 != 0 || fsc->m2 != 0) &&
1777	    cl->cl_fsc == NULL) {
1778		fsc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1779		    M_WAITOK);
1780		if (fsc_tmp == NULL)
1781			return (ENOMEM);
1782	}
1783	if (usc != NULL && (usc->m1 != 0 || usc->m2 != 0) &&
1784	    cl->cl_usc == NULL) {
1785		usc_tmp = malloc(sizeof(struct internal_sc), M_DEVBUF,
1786		    M_WAITOK);
1787		if (usc_tmp == NULL)
1788			return (ENOMEM);
1789	}
1790
1791	cur_time = read_machclk();
1792	s = splnet();
1793
1794	if (rsc != NULL) {
1795		if (rsc->m1 == 0 && rsc->m2 == 0) {
1796			if (cl->cl_rsc != NULL) {
1797				if (!qempty(cl->cl_q))
1798					hfsc_purgeq(cl);
1799				free(cl->cl_rsc, M_DEVBUF);
1800				cl->cl_rsc = NULL;
1801			}
1802		} else {
1803			if (cl->cl_rsc == NULL)
1804				cl->cl_rsc = rsc_tmp;
1805			sc2isc(rsc, cl->cl_rsc);
1806			rtsc_init(&cl->cl_deadline, cl->cl_rsc, cur_time,
1807			    cl->cl_cumul);
1808			cl->cl_eligible = cl->cl_deadline;
1809			if (cl->cl_rsc->sm1 <= cl->cl_rsc->sm2) {
1810				cl->cl_eligible.dx = 0;
1811				cl->cl_eligible.dy = 0;
1812			}
1813		}
1814	}
1815
1816	if (fsc != NULL) {
1817		if (fsc->m1 == 0 && fsc->m2 == 0) {
1818			if (cl->cl_fsc != NULL) {
1819				if (!qempty(cl->cl_q))
1820					hfsc_purgeq(cl);
1821				free(cl->cl_fsc, M_DEVBUF);
1822				cl->cl_fsc = NULL;
1823			}
1824		} else {
1825			if (cl->cl_fsc == NULL)
1826				cl->cl_fsc = fsc_tmp;
1827			sc2isc(fsc, cl->cl_fsc);
1828			rtsc_init(&cl->cl_virtual, cl->cl_fsc, cl->cl_vt,
1829			    cl->cl_total);
1830		}
1831	}
1832
1833	if (usc != NULL) {
1834		if (usc->m1 == 0 && usc->m2 == 0) {
1835			if (cl->cl_usc != NULL) {
1836				free(cl->cl_usc, M_DEVBUF);
1837				cl->cl_usc = NULL;
1838				cl->cl_myf = 0;
1839			}
1840		} else {
1841			if (cl->cl_usc == NULL)
1842				cl->cl_usc = usc_tmp;
1843			sc2isc(usc, cl->cl_usc);
1844			rtsc_init(&cl->cl_ulimit, cl->cl_usc, cur_time,
1845			    cl->cl_total);
1846		}
1847	}
1848
1849	if (!qempty(cl->cl_q)) {
1850		if (cl->cl_rsc != NULL)
1851			update_ed(cl, m_pktlen(qhead(cl->cl_q)));
1852		if (cl->cl_fsc != NULL)
1853			update_vf(cl, 0, cur_time);
1854		/* is this enough? */
1855	}
1856
1857	splx(s);
1858
1859	return (0);
1860}
1861
1862/*
1863 * hfsc device interface
1864 */
1865int
1866hfscopen(dev_t dev, int flag, int fmt,
1867    struct lwp *l)
1868{
1869	if (machclk_freq == 0)
1870		init_machclk();
1871
1872	if (machclk_freq == 0) {
1873		printf("hfsc: no CPU clock available!\n");
1874		return (ENXIO);
1875	}
1876
1877	/* everything will be done when the queueing scheme is attached. */
1878	return 0;
1879}
1880
1881int
1882hfscclose(dev_t dev, int flag, int fmt,
1883    struct lwp *l)
1884{
1885	struct hfsc_if *hif;
1886
1887	while ((hif = hif_list) != NULL) {
1888		/* destroy all */
1889		if (ALTQ_IS_ENABLED(hif->hif_ifq))
1890			altq_disable(hif->hif_ifq);
1891
1892		int error = altq_detach(hif->hif_ifq);
1893		switch (error) {
1894		case 0:
1895		case ENXIO:	/* already disabled */
1896			break;
1897		default:
1898			return error;
1899		}
1900		hfsc_detach(hif);
1901	}
1902
1903	return 0;
1904}
1905
1906int
1907hfscioctl(dev_t dev, ioctlcmd_t cmd, void *addr, int flag,
1908    struct lwp *l)
1909{
1910	struct hfsc_if *hif;
1911	struct hfsc_interface *ifacep;
1912	int	error = 0;
1913
1914	/* check super-user privilege */
1915	switch (cmd) {
1916	case HFSC_GETSTATS:
1917		break;
1918	default:
1919#if (__FreeBSD_version > 400000)
1920		if ((error = suser(p)) != 0)
1921			return (error);
1922#else
1923		if ((error = kauth_authorize_network(l->l_cred,
1924		    KAUTH_NETWORK_ALTQ, KAUTH_REQ_NETWORK_ALTQ_HFSC, NULL,
1925		    NULL, NULL)) != 0)
1926			return (error);
1927#endif
1928		break;
1929	}
1930
1931	switch (cmd) {
1932
1933	case HFSC_IF_ATTACH:
1934		error = hfsccmd_if_attach((struct hfsc_attach *)addr);
1935		break;
1936
1937	case HFSC_IF_DETACH:
1938		error = hfsccmd_if_detach((struct hfsc_interface *)addr);
1939		break;
1940
1941	case HFSC_ENABLE:
1942	case HFSC_DISABLE:
1943	case HFSC_CLEAR_HIERARCHY:
1944		ifacep = (struct hfsc_interface *)addr;
1945		if ((hif = altq_lookup(ifacep->hfsc_ifname,
1946				       ALTQT_HFSC)) == NULL) {
1947			error = EBADF;
1948			break;
1949		}
1950
1951		switch (cmd) {
1952
1953		case HFSC_ENABLE:
1954			if (hif->hif_defaultclass == NULL) {
1955#ifdef ALTQ_DEBUG
1956				printf("hfsc: no default class\n");
1957#endif
1958				error = EINVAL;
1959				break;
1960			}
1961			error = altq_enable(hif->hif_ifq);
1962			break;
1963
1964		case HFSC_DISABLE:
1965			error = altq_disable(hif->hif_ifq);
1966			break;
1967
1968		case HFSC_CLEAR_HIERARCHY:
1969			hfsc_clear_interface(hif);
1970			break;
1971		}
1972		break;
1973
1974	case HFSC_ADD_CLASS:
1975		error = hfsccmd_add_class((struct hfsc_add_class *)addr);
1976		break;
1977
1978	case HFSC_DEL_CLASS:
1979		error = hfsccmd_delete_class((struct hfsc_delete_class *)addr);
1980		break;
1981
1982	case HFSC_MOD_CLASS:
1983		error = hfsccmd_modify_class((struct hfsc_modify_class *)addr);
1984		break;
1985
1986	case HFSC_ADD_FILTER:
1987		error = hfsccmd_add_filter((struct hfsc_add_filter *)addr);
1988		break;
1989
1990	case HFSC_DEL_FILTER:
1991		error = hfsccmd_delete_filter((struct hfsc_delete_filter *)addr);
1992		break;
1993
1994	case HFSC_GETSTATS:
1995		error = hfsccmd_class_stats((struct hfsc_class_stats *)addr);
1996		break;
1997
1998	default:
1999		error = EINVAL;
2000		break;
2001	}
2002	return error;
2003}
2004
2005static int
2006hfsccmd_if_attach(struct hfsc_attach *ap)
2007{
2008	struct hfsc_if *hif;
2009	struct ifnet *ifp;
2010	int error;
2011
2012	if ((ifp = ifunit(ap->iface.hfsc_ifname)) == NULL)
2013		return (ENXIO);
2014
2015	if ((hif = hfsc_attach(&ifp->if_snd, ap->bandwidth)) == NULL)
2016		return (ENOMEM);
2017
2018	/*
2019	 * set HFSC to this ifnet structure.
2020	 */
2021	if ((error = altq_attach(&ifp->if_snd, ALTQT_HFSC, hif,
2022				 hfsc_enqueue, hfsc_dequeue, hfsc_request,
2023				 &hif->hif_classifier, acc_classify)) != 0)
2024		hfsc_detach(hif);
2025
2026	return (error);
2027}
2028
2029static int
2030hfsccmd_if_detach(struct hfsc_interface *ap)
2031{
2032	struct hfsc_if *hif;
2033	int error;
2034
2035	if ((hif = altq_lookup(ap->hfsc_ifname, ALTQT_HFSC)) == NULL)
2036		return (EBADF);
2037
2038	if (ALTQ_IS_ENABLED(hif->hif_ifq))
2039		altq_disable(hif->hif_ifq);
2040
2041	if ((error = altq_detach(hif->hif_ifq)))
2042		return (error);
2043
2044	hfsc_detach(hif);
2045	return 0;
2046}
2047
2048static int
2049hfsccmd_add_class(struct hfsc_add_class *ap)
2050{
2051	struct hfsc_if *hif;
2052	struct hfsc_class *cl, *parent;
2053	int	i;
2054
2055	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2056		return (EBADF);
2057
2058	if (ap->parent_handle == HFSC_NULLCLASS_HANDLE &&
2059	    hif->hif_rootclass == NULL)
2060		parent = NULL;
2061	else if ((parent = clh_to_clp(hif, ap->parent_handle)) == NULL)
2062		return (EINVAL);
2063
2064	/* assign a class handle (use a free slot number for now) */
2065	for (i = 1; i < HFSC_MAX_CLASSES; i++)
2066		if (hif->hif_class_tbl[i] == NULL)
2067			break;
2068	if (i == HFSC_MAX_CLASSES)
2069		return (EBUSY);
2070
2071	if ((cl = hfsc_class_create(hif, &ap->service_curve, NULL, NULL,
2072	    parent, ap->qlimit, ap->flags, i)) == NULL)
2073		return (ENOMEM);
2074
2075	/* return a class handle to the user */
2076	ap->class_handle = i;
2077
2078	return (0);
2079}
2080
2081static int
2082hfsccmd_delete_class(struct hfsc_delete_class *ap)
2083{
2084	struct hfsc_if *hif;
2085	struct hfsc_class *cl;
2086
2087	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2088		return (EBADF);
2089
2090	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2091		return (EINVAL);
2092
2093	return hfsc_class_destroy(cl);
2094}
2095
2096static int
2097hfsccmd_modify_class(struct hfsc_modify_class *ap)
2098{
2099	struct hfsc_if *hif;
2100	struct hfsc_class *cl;
2101	struct service_curve *rsc = NULL;
2102	struct service_curve *fsc = NULL;
2103	struct service_curve *usc = NULL;
2104
2105	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2106		return (EBADF);
2107
2108	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2109		return (EINVAL);
2110
2111	if (ap->sctype & HFSC_REALTIMESC)
2112		rsc = &ap->service_curve;
2113	if (ap->sctype & HFSC_LINKSHARINGSC)
2114		fsc = &ap->service_curve;
2115	if (ap->sctype & HFSC_UPPERLIMITSC)
2116		usc = &ap->service_curve;
2117
2118	return hfsc_class_modify(cl, rsc, fsc, usc);
2119}
2120
2121static int
2122hfsccmd_add_filter(struct hfsc_add_filter *ap)
2123{
2124	struct hfsc_if *hif;
2125	struct hfsc_class *cl;
2126
2127	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2128		return (EBADF);
2129
2130	if ((cl = clh_to_clp(hif, ap->class_handle)) == NULL)
2131		return (EINVAL);
2132
2133	if (is_a_parent_class(cl)) {
2134#ifdef ALTQ_DEBUG
2135		printf("hfsccmd_add_filter: not a leaf class!\n");
2136#endif
2137		return (EINVAL);
2138	}
2139
2140	return acc_add_filter(&hif->hif_classifier, &ap->filter,
2141			      cl, &ap->filter_handle);
2142}
2143
2144static int
2145hfsccmd_delete_filter(struct hfsc_delete_filter *ap)
2146{
2147	struct hfsc_if *hif;
2148
2149	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2150		return (EBADF);
2151
2152	return acc_delete_filter(&hif->hif_classifier,
2153				 ap->filter_handle);
2154}
2155
2156static int
2157hfsccmd_class_stats(struct hfsc_class_stats *ap)
2158{
2159	struct hfsc_if *hif;
2160	struct hfsc_class *cl;
2161	struct hfsc_classstats stats, *usp;
2162	int	n, nclasses, error;
2163
2164	if ((hif = altq_lookup(ap->iface.hfsc_ifname, ALTQT_HFSC)) == NULL)
2165		return (EBADF);
2166
2167	ap->cur_time = read_machclk();
2168	ap->machclk_freq = machclk_freq;
2169	ap->hif_classes = hif->hif_classes;
2170	ap->hif_packets = hif->hif_packets;
2171
2172	/* skip the first N classes in the tree */
2173	nclasses = ap->nskip;
2174	for (cl = hif->hif_rootclass, n = 0; cl != NULL && n < nclasses;
2175	     cl = hfsc_nextclass(cl), n++)
2176		;
2177	if (n != nclasses)
2178		return (EINVAL);
2179
2180	/* then, read the next N classes in the tree */
2181	nclasses = ap->nclasses;
2182	usp = ap->stats;
2183	for (n = 0; cl != NULL && n < nclasses; cl = hfsc_nextclass(cl), n++) {
2184
2185		memset(&stats, 0, sizeof(stats));
2186		get_class_stats(&stats, cl);
2187
2188		if ((error = copyout((void *)&stats, (void *)usp++,
2189				     sizeof(stats))) != 0)
2190			return (error);
2191	}
2192
2193	ap->nclasses = n;
2194
2195	return (0);
2196}
2197
2198#ifdef KLD_MODULE
2199
2200static struct altqsw hfsc_sw =
2201	{"hfsc", hfscopen, hfscclose, hfscioctl};
2202
2203ALTQ_MODULE(altq_hfsc, ALTQT_HFSC, &hfsc_sw);
2204MODULE_DEPEND(altq_hfsc, altq_red, 1, 1, 1);
2205MODULE_DEPEND(altq_hfsc, altq_rio, 1, 1, 1);
2206
2207#endif /* KLD_MODULE */
2208#endif /* ALTQ3_COMPAT */
2209
2210#endif /* ALTQ_HFSC */
2211