1/*
2 * Copyright (c) 2011-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*
30 * traffic class queue
31 */
32
33#include <sys/cdefs.h>
34#include <sys/param.h>
35#include <sys/malloc.h>
36#include <sys/mbuf.h>
37#include <sys/systm.h>
38#include <sys/errno.h>
39#include <sys/kernel.h>
40#include <sys/syslog.h>
41
42#include <kern/zalloc.h>
43
44#include <net/if.h>
45#include <net/net_osdep.h>
46
47#include <net/pktsched/pktsched_tcq.h>
48#include <netinet/in.h>
49
50/*
51 * function prototypes
52 */
53static int tcq_enqueue_ifclassq(struct ifclassq *, struct mbuf *);
54static struct mbuf *tcq_dequeue_tc_ifclassq(struct ifclassq *,
55    mbuf_svc_class_t, cqdq_op_t);
56static int tcq_request_ifclassq(struct ifclassq *, cqrq_t, void *);
57static int tcq_clear_interface(struct tcq_if *);
58static struct tcq_class *tcq_class_create(struct tcq_if *, int, u_int32_t,
59    int, u_int32_t);
60static int tcq_class_destroy(struct tcq_if *, struct tcq_class *);
61static int tcq_destroy_locked(struct tcq_if *);
62static inline int tcq_addq(struct tcq_class *, struct mbuf *,
63    struct pf_mtag *);
64static inline struct mbuf *tcq_getq(struct tcq_class *);
65static inline struct mbuf *tcq_pollq(struct tcq_class *);
66static void tcq_purgeq(struct tcq_if *, struct tcq_class *, u_int32_t,
67    u_int32_t *, u_int32_t *);
68static void tcq_purge_sc(struct tcq_if *, cqrq_purge_sc_t *);
69static void tcq_updateq(struct tcq_if *, struct tcq_class *, cqev_t);
70static int tcq_throttle(struct tcq_if *, cqrq_throttle_t *);
71static int tcq_resumeq(struct tcq_if *, struct tcq_class *);
72static int tcq_suspendq(struct tcq_if *, struct tcq_class *);
73static int tcq_stat_sc(struct tcq_if *, cqrq_stat_sc_t *);
74static struct mbuf *tcq_dequeue_cl(struct tcq_if *, struct tcq_class *,
75    mbuf_svc_class_t, cqdq_op_t);
76static inline struct tcq_class *tcq_clh_to_clp(struct tcq_if *, u_int32_t);
77static const char *tcq_style(struct tcq_if *);
78
79#define	TCQ_ZONE_MAX	32		/* maximum elements in zone */
80#define	TCQ_ZONE_NAME	"pktsched_tcq"	/* zone name */
81
82static unsigned int tcq_size;		/* size of zone element */
83static struct zone *tcq_zone;		/* zone for tcq */
84
85#define	TCQ_CL_ZONE_MAX	32		/* maximum elements in zone */
86#define	TCQ_CL_ZONE_NAME "pktsched_tcq_cl" /* zone name */
87
88static unsigned int tcq_cl_size;	/* size of zone element */
89static struct zone *tcq_cl_zone;	/* zone for tcq_class */
90
91void
92tcq_init(void)
93{
94	tcq_size = sizeof (struct tcq_if);
95	tcq_zone = zinit(tcq_size, TCQ_ZONE_MAX * tcq_size,
96	    0, TCQ_ZONE_NAME);
97	if (tcq_zone == NULL) {
98		panic("%s: failed allocating %s", __func__, TCQ_ZONE_NAME);
99		/* NOTREACHED */
100	}
101	zone_change(tcq_zone, Z_EXPAND, TRUE);
102	zone_change(tcq_zone, Z_CALLERACCT, TRUE);
103
104	tcq_cl_size = sizeof (struct tcq_class);
105	tcq_cl_zone = zinit(tcq_cl_size, TCQ_CL_ZONE_MAX * tcq_cl_size,
106	    0, TCQ_CL_ZONE_NAME);
107	if (tcq_cl_zone == NULL) {
108		panic("%s: failed allocating %s", __func__, TCQ_CL_ZONE_NAME);
109		/* NOTREACHED */
110	}
111	zone_change(tcq_cl_zone, Z_EXPAND, TRUE);
112	zone_change(tcq_cl_zone, Z_CALLERACCT, TRUE);
113}
114
115struct tcq_if *
116tcq_alloc(struct ifnet *ifp, int how, boolean_t altq)
117{
118	struct tcq_if	*tif;
119
120	tif = (how == M_WAITOK) ? zalloc(tcq_zone) : zalloc_noblock(tcq_zone);
121	if (tif == NULL)
122		return (NULL);
123
124	bzero(tif, tcq_size);
125	tif->tif_maxpri = -1;
126	tif->tif_ifq = &ifp->if_snd;
127	if (altq)
128		tif->tif_flags |= TCQIFF_ALTQ;
129
130	if (pktsched_verbose) {
131		log(LOG_DEBUG, "%s: %s scheduler allocated\n",
132		    if_name(ifp), tcq_style(tif));
133	}
134
135	return (tif);
136}
137
138int
139tcq_destroy(struct tcq_if *tif)
140{
141	struct ifclassq *ifq = tif->tif_ifq;
142	int err;
143
144	IFCQ_LOCK(ifq);
145	err = tcq_destroy_locked(tif);
146	IFCQ_UNLOCK(ifq);
147
148	return (err);
149}
150
151static int
152tcq_destroy_locked(struct tcq_if *tif)
153{
154	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
155
156	(void) tcq_clear_interface(tif);
157
158	if (pktsched_verbose) {
159		log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
160		    if_name(TCQIF_IFP(tif)), tcq_style(tif));
161	}
162
163	zfree(tcq_zone, tif);
164
165	return (0);
166}
167
168/*
169 * bring the interface back to the initial state by discarding
170 * all the filters and classes.
171 */
172static int
173tcq_clear_interface(struct tcq_if *tif)
174{
175	struct tcq_class	*cl;
176	int pri;
177
178	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
179
180	/* clear out the classes */
181	for (pri = 0; pri <= tif->tif_maxpri; pri++)
182		if ((cl = tif->tif_classes[pri]) != NULL)
183			tcq_class_destroy(tif, cl);
184
185	return (0);
186}
187
188/* discard all the queued packets on the interface */
189void
190tcq_purge(struct tcq_if *tif)
191{
192	struct tcq_class *cl;
193	int pri;
194
195	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
196
197	for (pri = 0; pri <= tif->tif_maxpri; pri++) {
198		if ((cl = tif->tif_classes[pri]) != NULL && !qempty(&cl->cl_q))
199			tcq_purgeq(tif, cl, 0, NULL, NULL);
200	}
201#if !PF_ALTQ
202	/*
203	 * This assertion is safe to be made only when PF_ALTQ is not
204	 * configured; otherwise, IFCQ_LEN represents the sum of the
205	 * packets managed by ifcq_disc and altq_disc instances, which
206	 * is possible when transitioning between the two.
207	 */
208	VERIFY(IFCQ_LEN(tif->tif_ifq) == 0);
209#endif /* !PF_ALTQ */
210}
211
212static void
213tcq_purge_sc(struct tcq_if *tif, cqrq_purge_sc_t *pr)
214{
215	struct ifclassq *ifq = tif->tif_ifq;
216	u_int32_t i;
217
218	IFCQ_LOCK_ASSERT_HELD(ifq);
219
220	VERIFY(pr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(pr->sc));
221	VERIFY(pr->flow != 0);
222
223	if (pr->sc != MBUF_SC_UNSPEC) {
224		i = MBUF_SCIDX(pr->sc);
225		VERIFY(i < IFCQ_SC_MAX);
226
227		tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl,
228		    pr->flow, &pr->packets, &pr->bytes);
229	} else {
230		u_int32_t cnt, len;
231
232		pr->packets = 0;
233		pr->bytes = 0;
234
235		for (i = 0; i < IFCQ_SC_MAX; i++) {
236			tcq_purgeq(tif, ifq->ifcq_disc_slots[i].cl,
237			    pr->flow, &cnt, &len);
238			pr->packets += cnt;
239			pr->bytes += len;
240		}
241	}
242}
243
244void
245tcq_event(struct tcq_if *tif, cqev_t ev)
246{
247	struct tcq_class *cl;
248	int pri;
249
250	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
251
252	for (pri = 0; pri <= tif->tif_maxpri; pri++)
253		if ((cl = tif->tif_classes[pri]) != NULL)
254			tcq_updateq(tif, cl, ev);
255}
256
257int
258tcq_add_queue(struct tcq_if *tif, int priority, u_int32_t qlimit,
259    int flags, u_int32_t qid, struct tcq_class **clp)
260{
261	struct tcq_class *cl;
262
263	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
264
265	/* check parameters */
266	if (priority >= TCQ_MAXPRI)
267		return (EINVAL);
268	if (tif->tif_classes[priority] != NULL)
269		return (EBUSY);
270	if (tcq_clh_to_clp(tif, qid) != NULL)
271		return (EBUSY);
272
273	cl = tcq_class_create(tif, priority, qlimit, flags, qid);
274	if (cl == NULL)
275		return (ENOMEM);
276
277	if (clp != NULL)
278		*clp = cl;
279
280	return (0);
281}
282
283static struct tcq_class *
284tcq_class_create(struct tcq_if *tif, int pri, u_int32_t qlimit,
285    int flags, u_int32_t qid)
286{
287	struct ifnet *ifp;
288	struct ifclassq *ifq;
289	struct tcq_class *cl;
290
291	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
292
293	/* Sanitize flags unless internally configured */
294	if (tif->tif_flags & TCQIFF_ALTQ)
295		flags &= TQCF_USERFLAGS;
296
297#if !CLASSQ_RED
298	if (flags & TQCF_RED) {
299		log(LOG_ERR, "%s: %s RED not available!\n",
300		    if_name(TCQIF_IFP(tif)), tcq_style(tif));
301		return (NULL);
302	}
303#endif /* !CLASSQ_RED */
304
305#if !CLASSQ_RIO
306	if (flags & TQCF_RIO) {
307		log(LOG_ERR, "%s: %s RIO not available!\n",
308		    if_name(TCQIF_IFP(tif)), tcq_style(tif));
309		return (NULL);
310	}
311#endif /* CLASSQ_RIO */
312
313#if !CLASSQ_BLUE
314	if (flags & TQCF_BLUE) {
315		log(LOG_ERR, "%s: %s BLUE not available!\n",
316		    if_name(TCQIF_IFP(tif)), tcq_style(tif));
317		return (NULL);
318	}
319#endif /* CLASSQ_BLUE */
320
321	/* These are mutually exclusive */
322	if ((flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) &&
323	    (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_RED &&
324	    (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_RIO &&
325	    (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_BLUE &&
326	    (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) != TQCF_SFB) {
327		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
328		    if_name(TCQIF_IFP(tif)), tcq_style(tif));
329		return (NULL);
330	}
331
332	ifq = tif->tif_ifq;
333	ifp = TCQIF_IFP(tif);
334
335	if ((cl = tif->tif_classes[pri]) != NULL) {
336		/* modify the class instead of creating a new one */
337		if (!qempty(&cl->cl_q))
338			tcq_purgeq(tif, cl, 0, NULL, NULL);
339#if CLASSQ_RIO
340		if (q_is_rio(&cl->cl_q))
341			rio_destroy(cl->cl_rio);
342#endif /* CLASSQ_RIO */
343#if CLASSQ_RED
344		if (q_is_red(&cl->cl_q))
345			red_destroy(cl->cl_red);
346#endif /* CLASSQ_RED */
347#if CLASSQ_BLUE
348		if (q_is_blue(&cl->cl_q))
349			blue_destroy(cl->cl_blue);
350#endif /* CLASSQ_BLUE */
351		if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
352			sfb_destroy(cl->cl_sfb);
353		cl->cl_qalg.ptr = NULL;
354		qtype(&cl->cl_q) = Q_DROPTAIL;
355		qstate(&cl->cl_q) = QS_RUNNING;
356	} else {
357		cl = zalloc(tcq_cl_zone);
358		if (cl == NULL)
359			return (NULL);
360
361		bzero(cl, tcq_cl_size);
362	}
363
364	tif->tif_classes[pri] = cl;
365	if (flags & TQCF_DEFAULTCLASS)
366		tif->tif_default = cl;
367	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
368		qlimit = IFCQ_MAXLEN(ifq);
369		if (qlimit == 0)
370			qlimit = DEFAULT_QLIMIT;  /* use default */
371	}
372	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
373	cl->cl_flags = flags;
374	cl->cl_pri = pri;
375	if (pri > tif->tif_maxpri)
376		tif->tif_maxpri = pri;
377	cl->cl_tif = tif;
378	cl->cl_handle = qid;
379
380	if (flags & (TQCF_RED|TQCF_RIO|TQCF_BLUE|TQCF_SFB)) {
381#if CLASSQ_RED || CLASSQ_RIO
382		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
383		int pkttime;
384#endif /* CLASSQ_RED || CLASSQ_RIO */
385
386		cl->cl_qflags = 0;
387		if (flags & TQCF_ECN) {
388			if (flags & TQCF_BLUE)
389				cl->cl_qflags |= BLUEF_ECN;
390			else if (flags & TQCF_SFB)
391				cl->cl_qflags |= SFBF_ECN;
392			else if (flags & TQCF_RED)
393				cl->cl_qflags |= REDF_ECN;
394			else if (flags & TQCF_RIO)
395				cl->cl_qflags |= RIOF_ECN;
396		}
397		if (flags & TQCF_FLOWCTL) {
398			if (flags & TQCF_SFB)
399				cl->cl_qflags |= SFBF_FLOWCTL;
400		}
401		if (flags & TQCF_DELAYBASED) {
402			if (flags & TQCF_SFB)
403				cl->cl_qflags |= SFBF_DELAYBASED;
404		}
405		if (flags & TQCF_CLEARDSCP) {
406			if (flags & TQCF_RIO)
407				cl->cl_qflags |= RIOF_CLEARDSCP;
408		}
409#if CLASSQ_RED || CLASSQ_RIO
410		/*
411		 * XXX: RED & RIO should be watching link speed and MTU
412		 *	events and recompute pkttime accordingly.
413		 */
414		if (ifbandwidth < 8)
415			pkttime = 1000 * 1000 * 1000; /* 1 sec */
416		else
417			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
418			    (ifbandwidth / 8);
419
420		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
421#if CLASSQ_RED
422		if (flags & TQCF_RED) {
423			cl->cl_red = red_alloc(ifp, 0, 0,
424			    qlimit(&cl->cl_q) * 10/100,
425			    qlimit(&cl->cl_q) * 30/100,
426			    cl->cl_qflags, pkttime);
427			if (cl->cl_red != NULL)
428				qtype(&cl->cl_q) = Q_RED;
429		}
430#endif /* CLASSQ_RED */
431#if CLASSQ_RIO
432		if (flags & TQCF_RIO) {
433			cl->cl_rio =
434			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
435			if (cl->cl_rio != NULL)
436				qtype(&cl->cl_q) = Q_RIO;
437		}
438#endif /* CLASSQ_RIO */
439#endif /* CLASSQ_RED || CLASSQ_RIO */
440#if CLASSQ_BLUE
441		if (flags & TQCF_BLUE) {
442			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
443			if (cl->cl_blue != NULL)
444				qtype(&cl->cl_q) = Q_BLUE;
445		}
446#endif /* CLASSQ_BLUE */
447		if (flags & TQCF_SFB) {
448			if (!(cl->cl_flags & TQCF_LAZY))
449				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
450				    qlimit(&cl->cl_q), cl->cl_qflags);
451			if (cl->cl_sfb != NULL || (cl->cl_flags & TQCF_LAZY))
452				qtype(&cl->cl_q) = Q_SFB;
453		}
454	}
455
456	if (pktsched_verbose) {
457		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
458		    "flags=%b\n", if_name(ifp), tcq_style(tif),
459		    cl->cl_handle, cl->cl_pri, qlimit, flags, TQCF_BITS);
460	}
461
462	return (cl);
463}
464
465int
466tcq_remove_queue(struct tcq_if *tif, u_int32_t qid)
467{
468	struct tcq_class *cl;
469
470	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
471
472	if ((cl = tcq_clh_to_clp(tif, qid)) == NULL)
473		return (EINVAL);
474
475	return (tcq_class_destroy(tif, cl));
476}
477
478static int
479tcq_class_destroy(struct tcq_if *tif, struct tcq_class *cl)
480{
481	struct ifclassq *ifq = tif->tif_ifq;
482	int pri;
483
484	IFCQ_LOCK_ASSERT_HELD(ifq);
485
486	if (!qempty(&cl->cl_q))
487		tcq_purgeq(tif, cl, 0, NULL, NULL);
488
489	tif->tif_classes[cl->cl_pri] = NULL;
490	if (tif->tif_maxpri == cl->cl_pri) {
491		for (pri = cl->cl_pri; pri >= 0; pri--)
492			if (tif->tif_classes[pri] != NULL) {
493				tif->tif_maxpri = pri;
494				break;
495			}
496		if (pri < 0)
497			tif->tif_maxpri = -1;
498	}
499
500	if (tif->tif_default == cl)
501		tif->tif_default = NULL;
502
503	if (cl->cl_qalg.ptr != NULL) {
504#if CLASSQ_RIO
505		if (q_is_rio(&cl->cl_q))
506			rio_destroy(cl->cl_rio);
507#endif /* CLASSQ_RIO */
508#if CLASSQ_RED
509		if (q_is_red(&cl->cl_q))
510			red_destroy(cl->cl_red);
511#endif /* CLASSQ_RED */
512#if CLASSQ_BLUE
513		if (q_is_blue(&cl->cl_q))
514			blue_destroy(cl->cl_blue);
515#endif /* CLASSQ_BLUE */
516		if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
517			sfb_destroy(cl->cl_sfb);
518		cl->cl_qalg.ptr = NULL;
519		qtype(&cl->cl_q) = Q_DROPTAIL;
520		qstate(&cl->cl_q) = QS_RUNNING;
521	}
522
523	if (pktsched_verbose) {
524		log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
525		    if_name(TCQIF_IFP(tif)), tcq_style(tif),
526		    cl->cl_handle, cl->cl_pri);
527	}
528
529	zfree(tcq_cl_zone, cl);
530	return (0);
531}
532
533int
534tcq_enqueue(struct tcq_if *tif, struct tcq_class *cl, struct mbuf *m,
535    struct pf_mtag *t)
536{
537	struct ifclassq *ifq = tif->tif_ifq;
538	int len, ret;
539
540	IFCQ_LOCK_ASSERT_HELD(ifq);
541	VERIFY(cl == NULL || cl->cl_tif == tif);
542
543	if (cl == NULL) {
544#if PF_ALTQ
545		cl = tcq_clh_to_clp(tif, t->pftag_qid);
546#else /* !PF_ALTQ */
547		cl = tcq_clh_to_clp(tif, 0);
548#endif /* !PF_ALTQ */
549		if (cl == NULL) {
550			cl = tif->tif_default;
551			if (cl == NULL) {
552				IFCQ_CONVERT_LOCK(ifq);
553				m_freem(m);
554				return (ENOBUFS);
555			}
556		}
557	}
558
559	len = m_pktlen(m);
560
561	ret = tcq_addq(cl, m, t);
562	if (ret != 0) {
563		if (ret == CLASSQEQ_SUCCESS_FC) {
564			/* packet enqueued, return advisory feedback */
565			ret = EQFULL;
566		} else {
567			VERIFY(ret == CLASSQEQ_DROPPED ||
568			    ret == CLASSQEQ_DROPPED_FC ||
569			    ret == CLASSQEQ_DROPPED_SP);
570			/* packet has been freed in tcq_addq */
571			PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
572			IFCQ_DROP_ADD(ifq, 1, len);
573			switch (ret) {
574			case CLASSQEQ_DROPPED:
575				return (ENOBUFS);
576			case CLASSQEQ_DROPPED_FC:
577				return (EQFULL);
578			case CLASSQEQ_DROPPED_SP:
579				return (EQSUSPENDED);
580			}
581			/* NOT REACHED */
582		}
583	}
584	IFCQ_INC_LEN(ifq);
585
586	/* successfully queued. */
587	return (ret);
588}
589
590/*
591 * note: CLASSQDQ_POLL returns the next packet without removing the packet
592 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
593 *	CLASSQDQ_REMOVE must return the same packet if called immediately
594 *	after CLASSQDQ_POLL.
595 */
596struct mbuf *
597tcq_dequeue_tc(struct tcq_if *tif, mbuf_svc_class_t sc, cqdq_op_t op)
598{
599	return (tcq_dequeue_cl(tif, NULL, sc, op));
600}
601
602static struct mbuf *
603tcq_dequeue_cl(struct tcq_if *tif, struct tcq_class *cl,
604    mbuf_svc_class_t sc, cqdq_op_t op)
605{
606	struct ifclassq *ifq = tif->tif_ifq;
607	struct mbuf *m;
608
609	IFCQ_LOCK_ASSERT_HELD(ifq);
610
611	if (cl == NULL) {
612		cl = tcq_clh_to_clp(tif, MBUF_SCIDX(sc));
613		if (cl == NULL)
614			return (NULL);
615	}
616
617	if (qempty(&cl->cl_q))
618		return (NULL);
619
620	VERIFY(!IFCQ_IS_EMPTY(ifq));
621
622	if (op == CLASSQDQ_POLL)
623		return (tcq_pollq(cl));
624
625	m = tcq_getq(cl);
626	if (m != NULL) {
627		IFCQ_DEC_LEN(ifq);
628		if (qempty(&cl->cl_q))
629			cl->cl_period++;
630		PKTCNTR_ADD(&cl->cl_xmitcnt, 1, m_pktlen(m));
631		IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
632	}
633	return (m);
634}
635
636static inline int
637tcq_addq(struct tcq_class *cl, struct mbuf *m, struct pf_mtag *t)
638{
639	struct tcq_if *tif = cl->cl_tif;
640	struct ifclassq *ifq = tif->tif_ifq;
641
642	IFCQ_LOCK_ASSERT_HELD(ifq);
643
644#if CLASSQ_RIO
645	if (q_is_rio(&cl->cl_q))
646		return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
647	else
648#endif /* CLASSQ_RIO */
649#if CLASSQ_RED
650	if (q_is_red(&cl->cl_q))
651		return (red_addq(cl->cl_red, &cl->cl_q, m, t));
652	else
653#endif /* CLASSQ_RED */
654#if CLASSQ_BLUE
655	if (q_is_blue(&cl->cl_q))
656		return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
657	else
658#endif /* CLASSQ_BLUE */
659	if (q_is_sfb(&cl->cl_q)) {
660		if (cl->cl_sfb == NULL) {
661			struct ifnet *ifp = TCQIF_IFP(tif);
662
663			VERIFY(cl->cl_flags & TQCF_LAZY);
664			cl->cl_flags &= ~TQCF_LAZY;
665			IFCQ_CONVERT_LOCK(ifq);
666
667			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
668			    qlimit(&cl->cl_q), cl->cl_qflags);
669			if (cl->cl_sfb == NULL) {
670				/* fall back to droptail */
671				qtype(&cl->cl_q) = Q_DROPTAIL;
672				cl->cl_flags &= ~TQCF_SFB;
673				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);
674
675				log(LOG_ERR, "%s: %s SFB lazy allocation "
676				    "failed for qid=%d pri=%d, falling back "
677				    "to DROPTAIL\n", if_name(ifp),
678				    tcq_style(tif), cl->cl_handle,
679				    cl->cl_pri);
680			} else if (tif->tif_throttle != IFNET_THROTTLE_OFF) {
681				/* if there's pending throttling, set it */
682				cqrq_throttle_t tr = { 1, tif->tif_throttle };
683				int err = tcq_throttle(tif, &tr);
684
685				if (err == EALREADY)
686					err = 0;
687				if (err != 0) {
688					tr.level = IFNET_THROTTLE_OFF;
689					(void) tcq_throttle(tif, &tr);
690				}
691			}
692		}
693		if (cl->cl_sfb != NULL)
694			return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
695	} else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
696		IFCQ_CONVERT_LOCK(ifq);
697		m_freem(m);
698		return (CLASSQEQ_DROPPED);
699	}
700
701#if PF_ECN
702	if (cl->cl_flags & TQCF_CLEARDSCP)
703		write_dsfield(m, t, 0);
704#endif /* PF_ECN */
705
706	_addq(&cl->cl_q, m);
707
708	return (0);
709}
710
711static inline struct mbuf *
712tcq_getq(struct tcq_class *cl)
713{
714	IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq);
715
716#if CLASSQ_RIO
717	if (q_is_rio(&cl->cl_q))
718		return (rio_getq(cl->cl_rio, &cl->cl_q));
719	else
720#endif /* CLASSQ_RIO */
721#if CLASSQ_RED
722	if (q_is_red(&cl->cl_q))
723		return (red_getq(cl->cl_red, &cl->cl_q));
724	else
725#endif /* CLASSQ_RED */
726#if CLASSQ_BLUE
727	if (q_is_blue(&cl->cl_q))
728		return (blue_getq(cl->cl_blue, &cl->cl_q));
729	else
730#endif /* CLASSQ_BLUE */
731	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
732		return (sfb_getq(cl->cl_sfb, &cl->cl_q));
733
734	return (_getq(&cl->cl_q));
735}
736
737static inline struct mbuf *
738tcq_pollq(struct tcq_class *cl)
739{
740	IFCQ_LOCK_ASSERT_HELD(cl->cl_tif->tif_ifq);
741
742	return (qhead(&cl->cl_q));
743}
744
745static void
746tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow,
747    u_int32_t *packets, u_int32_t *bytes)
748{
749	struct ifclassq *ifq = tif->tif_ifq;
750	u_int32_t cnt = 0, len = 0, qlen;
751
752	IFCQ_LOCK_ASSERT_HELD(ifq);
753
754	if ((qlen = qlen(&cl->cl_q)) == 0)
755		goto done;
756
757	/* become regular mutex before freeing mbufs */
758	IFCQ_CONVERT_LOCK(ifq);
759
760#if CLASSQ_RIO
761	if (q_is_rio(&cl->cl_q))
762		rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
763	else
764#endif /* CLASSQ_RIO */
765#if CLASSQ_RED
766	if (q_is_red(&cl->cl_q))
767		red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
768	else
769#endif /* CLASSQ_RED */
770#if CLASSQ_BLUE
771	if (q_is_blue(&cl->cl_q))
772		blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
773	else
774#endif /* CLASSQ_BLUE */
775	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
776		sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
777	else
778		_flushq_flow(&cl->cl_q, flow, &cnt, &len);
779
780	if (cnt > 0) {
781		VERIFY(qlen(&cl->cl_q) == (qlen - cnt));
782
783		PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
784		IFCQ_DROP_ADD(ifq, cnt, len);
785
786		VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
787		IFCQ_LEN(ifq) -= cnt;
788
789		if (pktsched_verbose) {
790			log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
791			    "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
792			    if_name(TCQIF_IFP(tif)), tcq_style(tif),
793			    cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
794			    cnt, len, flow);
795		}
796	}
797done:
798	if (packets != NULL)
799		*packets = cnt;
800	if (bytes != NULL)
801		*bytes = len;
802}
803
804static void
805tcq_updateq(struct tcq_if *tif, struct tcq_class *cl, cqev_t ev)
806{
807	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
808
809	if (pktsched_verbose) {
810		log(LOG_DEBUG, "%s: %s update qid=%d pri=%d event=%s\n",
811		    if_name(TCQIF_IFP(tif)), tcq_style(tif),
812		    cl->cl_handle, cl->cl_pri, ifclassq_ev2str(ev));
813	}
814
815#if CLASSQ_RIO
816	if (q_is_rio(&cl->cl_q))
817		return (rio_updateq(cl->cl_rio, ev));
818#endif /* CLASSQ_RIO */
819#if CLASSQ_RED
820	if (q_is_red(&cl->cl_q))
821		return (red_updateq(cl->cl_red, ev));
822#endif /* CLASSQ_RED */
823#if CLASSQ_BLUE
824	if (q_is_blue(&cl->cl_q))
825		return (blue_updateq(cl->cl_blue, ev));
826#endif /* CLASSQ_BLUE */
827	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
828		return (sfb_updateq(cl->cl_sfb, ev));
829}
830
831int
832tcq_get_class_stats(struct tcq_if *tif, u_int32_t qid,
833    struct tcq_classstats *sp)
834{
835	struct tcq_class *cl;
836
837	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
838
839	if ((cl = tcq_clh_to_clp(tif, qid)) == NULL)
840		return (EINVAL);
841
842	sp->class_handle = cl->cl_handle;
843	sp->priority = cl->cl_pri;
844	sp->qlength = qlen(&cl->cl_q);
845	sp->qlimit = qlimit(&cl->cl_q);
846	sp->period = cl->cl_period;
847	sp->xmitcnt = cl->cl_xmitcnt;
848	sp->dropcnt = cl->cl_dropcnt;
849
850	sp->qtype = qtype(&cl->cl_q);
851	sp->qstate = qstate(&cl->cl_q);
852#if CLASSQ_RED
853	if (q_is_red(&cl->cl_q))
854		red_getstats(cl->cl_red, &sp->red[0]);
855#endif /* CLASSQ_RED */
856#if CLASSQ_RIO
857	if (q_is_rio(&cl->cl_q))
858		rio_getstats(cl->cl_rio, &sp->red[0]);
859#endif /* CLASSQ_RIO */
860#if CLASSQ_BLUE
861	if (q_is_blue(&cl->cl_q))
862		blue_getstats(cl->cl_blue, &sp->blue);
863#endif /* CLASSQ_BLUE */
864	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
865		sfb_getstats(cl->cl_sfb, &sp->sfb);
866
867	return (0);
868}
869
870static int
871tcq_stat_sc(struct tcq_if *tif, cqrq_stat_sc_t *sr)
872{
873	struct ifclassq *ifq = tif->tif_ifq;
874	struct tcq_class *cl;
875	u_int32_t i;
876
877	IFCQ_LOCK_ASSERT_HELD(ifq);
878
879	VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc));
880
881	i = MBUF_SCIDX(sr->sc);
882	VERIFY(i < IFCQ_SC_MAX);
883
884	cl = ifq->ifcq_disc_slots[i].cl;
885	sr->packets = qlen(&cl->cl_q);
886	sr->bytes = qsize(&cl->cl_q);
887
888	return (0);
889}
890
891/* convert a class handle to the corresponding class pointer */
892static inline struct tcq_class *
893tcq_clh_to_clp(struct tcq_if *tif, u_int32_t chandle)
894{
895	struct tcq_class *cl;
896	int idx;
897
898	IFCQ_LOCK_ASSERT_HELD(tif->tif_ifq);
899
900	for (idx = tif->tif_maxpri; idx >= 0; idx--)
901		if ((cl = tif->tif_classes[idx]) != NULL &&
902		    cl->cl_handle == chandle)
903			return (cl);
904
905	return (NULL);
906}
907
908static const char *
909tcq_style(struct tcq_if *tif)
910{
911	return ((tif->tif_flags & TCQIFF_ALTQ) ? "ALTQ_TCQ" : "TCQ");
912}
913
914/*
915 * tcq_enqueue_ifclassq is an enqueue function to be registered to
916 * (*ifcq_enqueue) in struct ifclassq.
917 */
918static int
919tcq_enqueue_ifclassq(struct ifclassq *ifq, struct mbuf *m)
920{
921	u_int32_t i;
922
923	IFCQ_LOCK_ASSERT_HELD(ifq);
924
925	if (!(m->m_flags & M_PKTHDR)) {
926		/* should not happen */
927		log(LOG_ERR, "%s: packet does not have pkthdr\n",
928		    if_name(ifq->ifcq_ifp));
929		IFCQ_CONVERT_LOCK(ifq);
930		m_freem(m);
931		return (ENOBUFS);
932	}
933
934	i = MBUF_SCIDX(mbuf_get_service_class(m));
935	VERIFY((u_int32_t)i < IFCQ_SC_MAX);
936
937	return (tcq_enqueue(ifq->ifcq_disc,
938	    ifq->ifcq_disc_slots[i].cl, m, m_pftag(m)));
939}
940
941/*
942 * tcq_dequeue_tc_ifclassq is a dequeue function to be registered to
943 * (*ifcq_dequeue) in struct ifclass.
944 *
945 * note: CLASSQDQ_POLL returns the next packet without removing the packet
946 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
947 *	CLASSQDQ_REMOVE must return the same packet if called immediately
948 *	after CLASSQDQ_POLL.
949 */
950static struct mbuf *
951tcq_dequeue_tc_ifclassq(struct ifclassq *ifq, mbuf_svc_class_t sc,
952    cqdq_op_t op)
953{
954	u_int32_t i = MBUF_SCIDX(sc);
955
956	VERIFY((u_int32_t)i < IFCQ_SC_MAX);
957
958	return (tcq_dequeue_cl(ifq->ifcq_disc,
959	    ifq->ifcq_disc_slots[i].cl, sc, op));
960}
961
962static int
963tcq_request_ifclassq(struct ifclassq *ifq, cqrq_t req, void *arg)
964{
965	struct tcq_if	*tif = (struct tcq_if *)ifq->ifcq_disc;
966	int err = 0;
967
968	IFCQ_LOCK_ASSERT_HELD(ifq);
969
970	switch (req) {
971	case CLASSQRQ_PURGE:
972		tcq_purge(tif);
973		break;
974
975	case CLASSQRQ_PURGE_SC:
976		tcq_purge_sc(tif, (cqrq_purge_sc_t *)arg);
977		break;
978
979	case CLASSQRQ_EVENT:
980		tcq_event(tif, (cqev_t)arg);
981		break;
982
983	case CLASSQRQ_THROTTLE:
984		err = tcq_throttle(tif, (cqrq_throttle_t *)arg);
985		break;
986
987	case CLASSQRQ_STAT_SC:
988		err = tcq_stat_sc(tif, (cqrq_stat_sc_t *)arg);
989		break;
990	}
991	return (err);
992}
993
994int
995tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
996{
997	struct ifnet *ifp = ifq->ifcq_ifp;
998	struct tcq_class *cl0, *cl1, *cl2, *cl3;
999	struct tcq_if *tif;
1000	u_int32_t maxlen = 0, qflags = 0;
1001	int err = 0;
1002
1003	IFCQ_LOCK_ASSERT_HELD(ifq);
1004	VERIFY(ifq->ifcq_disc == NULL);
1005	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
1006
1007	if (flags & PKTSCHEDF_QALG_RED)
1008		qflags |= TQCF_RED;
1009	if (flags & PKTSCHEDF_QALG_RIO)
1010		qflags |= TQCF_RIO;
1011	if (flags & PKTSCHEDF_QALG_BLUE)
1012		qflags |= TQCF_BLUE;
1013	if (flags & PKTSCHEDF_QALG_SFB)
1014		qflags |= TQCF_SFB;
1015	if (flags & PKTSCHEDF_QALG_ECN)
1016		qflags |= TQCF_ECN;
1017	if (flags & PKTSCHEDF_QALG_FLOWCTL)
1018		qflags |= TQCF_FLOWCTL;
1019	if (flags & PKTSCHEDF_QALG_DELAYBASED)
1020		qflags |= TQCF_DELAYBASED;
1021
1022	tif = tcq_alloc(ifp, M_WAITOK, FALSE);
1023	if (tif == NULL)
1024		return (ENOMEM);
1025
1026	if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
1027		maxlen = if_sndq_maxlen;
1028
1029	if ((err = tcq_add_queue(tif, 0, maxlen,
1030	    qflags | PRCF_LAZY, SCIDX_BK, &cl0)) != 0)
1031		goto cleanup;
1032
1033	if ((err = tcq_add_queue(tif, 1, maxlen,
1034	    qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1)) != 0)
1035		goto cleanup;
1036
1037	if ((err = tcq_add_queue(tif, 2, maxlen,
1038	    qflags | PRCF_LAZY, SCIDX_VI, &cl2)) != 0)
1039		goto cleanup;
1040
1041	if ((err = tcq_add_queue(tif, 3, maxlen,
1042	    qflags, SCIDX_VO, &cl3)) != 0)
1043		goto cleanup;
1044
1045	err = ifclassq_attach(ifq, PKTSCHEDT_TCQ, tif,
1046	    tcq_enqueue_ifclassq, NULL, tcq_dequeue_tc_ifclassq,
1047	    tcq_request_ifclassq);
1048
1049	/* cache these for faster lookup */
1050	if (err == 0) {
1051		/* Map {BK_SYS,BK} to TC_BK */
1052		ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK;
1053		ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;
1054
1055		ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
1056		ifq->ifcq_disc_slots[SCIDX_BK].cl = cl0;
1057
1058		/* Map {BE,RD,OAM} to TC_BE */
1059		ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
1060		ifq->ifcq_disc_slots[SCIDX_BE].cl = cl1;
1061
1062		ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_BE;
1063		ifq->ifcq_disc_slots[SCIDX_RD].cl = cl1;
1064
1065		ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_BE;
1066		ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl1;
1067
1068		/* Map {AV,RV,VI} to TC_VI */
1069		ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_VI;
1070		ifq->ifcq_disc_slots[SCIDX_AV].cl = cl2;
1071
1072		ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_VI;
1073		ifq->ifcq_disc_slots[SCIDX_RV].cl = cl2;
1074
1075		ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
1076		ifq->ifcq_disc_slots[SCIDX_VI].cl = cl2;
1077
1078		/* Map {VO,CTL} to TC_VO */
1079		ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
1080		ifq->ifcq_disc_slots[SCIDX_VO].cl = cl3;
1081
1082		ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_VO;
1083		ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl3;
1084	}
1085
1086cleanup:
1087	if (err != 0)
1088		(void) tcq_destroy_locked(tif);
1089
1090	return (err);
1091}
1092
1093int
1094tcq_teardown_ifclassq(struct ifclassq *ifq)
1095{
1096	struct tcq_if *tif = ifq->ifcq_disc;
1097	int i;
1098
1099	IFCQ_LOCK_ASSERT_HELD(ifq);
1100	VERIFY(tif != NULL && ifq->ifcq_type == PKTSCHEDT_TCQ);
1101
1102	(void) tcq_destroy_locked(tif);
1103
1104	ifq->ifcq_disc = NULL;
1105	for (i = 0; i < IFCQ_SC_MAX; i++) {
1106		ifq->ifcq_disc_slots[i].qid = 0;
1107		ifq->ifcq_disc_slots[i].cl = NULL;
1108	}
1109
1110	return (ifclassq_detach(ifq));
1111}
1112
1113int
1114tcq_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t slot,
1115    struct if_ifclassq_stats *ifqs)
1116{
1117	struct tcq_if *tif = ifq->ifcq_disc;
1118
1119	IFCQ_LOCK_ASSERT_HELD(ifq);
1120	VERIFY(ifq->ifcq_type == PKTSCHEDT_TCQ);
1121
1122	if (slot >= IFCQ_SC_MAX)
1123		return (EINVAL);
1124
1125	return (tcq_get_class_stats(tif, ifq->ifcq_disc_slots[slot].qid,
1126	    &ifqs->ifqs_tcq_stats));
1127}
1128
1129static int
1130tcq_throttle(struct tcq_if *tif, cqrq_throttle_t *tr)
1131{
1132	struct ifclassq *ifq = tif->tif_ifq;
1133	struct tcq_class *cl;
1134	int err = 0;
1135
1136	IFCQ_LOCK_ASSERT_HELD(ifq);
1137	VERIFY(!(tif->tif_flags & TCQIFF_ALTQ));
1138
1139	if (!tr->set) {
1140		tr->level = tif->tif_throttle;
1141		return (0);
1142	}
1143
1144	if (tr->level == tif->tif_throttle)
1145		return (EALREADY);
1146
1147	/* Current throttling levels only involve BK_SYS class */
1148	cl = ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl;
1149
1150	switch (tr->level) {
1151	case IFNET_THROTTLE_OFF:
1152		err = tcq_resumeq(tif, cl);
1153		break;
1154
1155	case IFNET_THROTTLE_OPPORTUNISTIC:
1156		err = tcq_suspendq(tif, cl);
1157		break;
1158
1159	default:
1160		VERIFY(0);
1161		/* NOTREACHED */
1162	}
1163
1164	if (err == 0 || err == ENXIO) {
1165		if (pktsched_verbose) {
1166			log(LOG_DEBUG, "%s: %s throttling %slevel set %d->%d\n",
1167			    if_name(TCQIF_IFP(tif)), tcq_style(tif),
1168			    (err == 0) ? "" : "lazy ", tif->tif_throttle,
1169			    tr->level);
1170		}
1171		tif->tif_throttle = tr->level;
1172		if (err != 0)
1173			err = 0;
1174		else
1175			tcq_purgeq(tif, cl, 0, NULL, NULL);
1176	} else {
1177		log(LOG_ERR, "%s: %s unable to set throttling level "
1178		    "%d->%d [error=%d]\n", if_name(TCQIF_IFP(tif)),
1179		    tcq_style(tif), tif->tif_throttle, tr->level, err);
1180	}
1181
1182	return (err);
1183}
1184
1185static int
1186tcq_resumeq(struct tcq_if *tif, struct tcq_class *cl)
1187{
1188	struct ifclassq *ifq = tif->tif_ifq;
1189	int err = 0;
1190
1191	IFCQ_LOCK_ASSERT_HELD(ifq);
1192
1193#if CLASSQ_RIO
1194	if (q_is_rio(&cl->cl_q))
1195		err = rio_suspendq(cl->cl_rio, &cl->cl_q, FALSE);
1196	else
1197#endif /* CLASSQ_RIO */
1198#if CLASSQ_RED
1199	if (q_is_red(&cl->cl_q))
1200		err = red_suspendq(cl->cl_red, &cl->cl_q, FALSE);
1201	else
1202#endif /* CLASSQ_RED */
1203#if CLASSQ_BLUE
1204	if (q_is_blue(&cl->cl_q))
1205		err = blue_suspendq(cl->cl_blue, &cl->cl_q, FALSE);
1206	else
1207#endif /* CLASSQ_BLUE */
1208	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
1209		err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, FALSE);
1210
1211	if (err == 0)
1212		qstate(&cl->cl_q) = QS_RUNNING;
1213
1214	return (err);
1215}
1216
1217static int
1218tcq_suspendq(struct tcq_if *tif, struct tcq_class *cl)
1219{
1220	struct ifclassq *ifq = tif->tif_ifq;
1221	int err = 0;
1222
1223	IFCQ_LOCK_ASSERT_HELD(ifq);
1224
1225#if CLASSQ_RIO
1226	if (q_is_rio(&cl->cl_q))
1227		err = rio_suspendq(cl->cl_rio, &cl->cl_q, TRUE);
1228	else
1229#endif /* CLASSQ_RIO */
1230#if CLASSQ_RED
1231	if (q_is_red(&cl->cl_q))
1232		err = red_suspendq(cl->cl_red, &cl->cl_q, TRUE);
1233	else
1234#endif /* CLASSQ_RED */
1235#if CLASSQ_BLUE
1236	if (q_is_blue(&cl->cl_q))
1237		err = blue_suspendq(cl->cl_blue, &cl->cl_q, TRUE);
1238	else
1239#endif /* CLASSQ_BLUE */
1240	if (q_is_sfb(&cl->cl_q)) {
1241		if (cl->cl_sfb != NULL) {
1242			err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE);
1243		} else {
1244			VERIFY(cl->cl_flags & TQCF_LAZY);
1245			err = ENXIO;	/* delayed throttling */
1246		}
1247	}
1248
1249	if (err == 0 || err == ENXIO)
1250		qstate(&cl->cl_q) = QS_SUSPENDED;
1251
1252	return (err);
1253}
1254