1/*
2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <sys/cdefs.h>
30#include <sys/param.h>
31#include <sys/mbuf.h>
32#include <sys/errno.h>
33#include <sys/random.h>
34#include <sys/kernel_types.h>
35#include <sys/sysctl.h>
36
37#include <kern/zalloc.h>
38
39#include <net/if.h>
40#include <net/net_osdep.h>
41#include <net/classq/classq.h>
42#if CLASSQ_RED
43#include <net/classq/classq_red.h>
44#endif /* CLASSQ_RED */
45#if CLASSQ_RIO
46#include <net/classq/classq_rio.h>
47#endif /* CLASSQ_RIO */
48#if CLASSQ_BLUE
49#include <net/classq/classq_blue.h>
50#endif /* CLASSQ_BLUE */
51#include <net/classq/classq_sfb.h>
52#include <net/pktsched/pktsched.h>
53
54#include <libkern/libkern.h>
55
56#if PF_ALTQ
57#include <net/altq/altq.h>
58#endif /* PF_ALTQ */
59
60static errno_t ifclassq_dequeue_common(struct ifclassq *, mbuf_svc_class_t,
61    u_int32_t, struct mbuf **, struct mbuf **, u_int32_t *, u_int32_t *,
62    boolean_t);
63static struct mbuf *ifclassq_poll_common(struct ifclassq *,
64    mbuf_svc_class_t, boolean_t);
65static struct mbuf *ifclassq_tbr_dequeue_common(struct ifclassq *, int,
66    mbuf_svc_class_t, boolean_t);
67
68void
69classq_init(void)
70{
71	_CASSERT(MBUF_TC_BE == 0);
72	_CASSERT(MBUF_SC_BE == 0);
73	_CASSERT(IFCQ_SC_MAX == MBUF_SC_MAX_CLASSES);
74
75#if CLASSQ_RED
76	red_init();
77#endif /* CLASSQ_RED */
78#if CLASSQ_RIO
79	rio_init();
80#endif /* CLASSQ_RIO */
81#if CLASSQ_BLUE
82	blue_init();
83#endif /* CLASSQ_BLUE */
84	sfb_init();
85}
86
87int
88ifclassq_setup(struct ifnet *ifp, u_int32_t sflags, boolean_t reuse)
89{
90#pragma unused(reuse)
91	struct ifclassq *ifq = &ifp->if_snd;
92	int err = 0;
93
94	IFCQ_LOCK(ifq);
95	VERIFY(IFCQ_IS_EMPTY(ifq));
96	ifq->ifcq_ifp = ifp;
97	IFCQ_LEN(ifq) = 0;
98	bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt));
99	bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt));
100
101	VERIFY(!IFCQ_TBR_IS_ENABLED(ifq));
102	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
103	VERIFY(ifq->ifcq_flags == 0);
104	VERIFY(ifq->ifcq_sflags == 0);
105	VERIFY(ifq->ifcq_disc == NULL);
106	VERIFY(ifq->ifcq_enqueue == NULL);
107	VERIFY(ifq->ifcq_dequeue == NULL);
108	VERIFY(ifq->ifcq_dequeue_sc == NULL);
109	VERIFY(ifq->ifcq_request == NULL);
110
111	if (ifp->if_eflags & IFEF_TXSTART) {
112		u_int32_t maxlen = 0;
113
114		if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
115			maxlen = if_sndq_maxlen;
116		IFCQ_SET_MAXLEN(ifq, maxlen);
117
118		if (IFCQ_MAXLEN(ifq) != if_sndq_maxlen &&
119		    IFCQ_TARGET_QDELAY(ifq) == 0) {
120			/*
121			 * Choose static queues because the interface has
122			 * maximum queue size set
123			 */
124			sflags &= ~PKTSCHEDF_QALG_DELAYBASED;
125		}
126		ifq->ifcq_sflags = sflags;
127		err = ifclassq_pktsched_setup(ifq);
128		if (err == 0)
129			ifq->ifcq_flags = (IFCQF_READY | IFCQF_ENABLED);
130	}
131
132#if PF_ALTQ
133	ifq->ifcq_drain = 0;
134	IFCQ_ALTQ(ifq)->altq_ifcq = ifq;
135	VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE);
136	VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0);
137	VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL);
138	VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL);
139	VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL);
140	VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL);
141	VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL);
142
143	if ((ifp->if_eflags & IFEF_TXSTART) &&
144	    ifp->if_output_sched_model != IFNET_SCHED_MODEL_DRIVER_MANAGED)
145		ALTQ_SET_READY(IFCQ_ALTQ(ifq));
146	else
147		ALTQ_CLEAR_READY(IFCQ_ALTQ(ifq));
148#endif /* PF_ALTQ */
149	IFCQ_UNLOCK(ifq);
150
151	return (err);
152}
153
154void
155ifclassq_teardown(struct ifnet *ifp)
156{
157	struct ifclassq *ifq = &ifp->if_snd;
158
159	IFCQ_LOCK(ifq);
160#if PF_ALTQ
161	if (ALTQ_IS_READY(IFCQ_ALTQ(ifq))) {
162		if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq)))
163			altq_disable(IFCQ_ALTQ(ifq));
164		if (ALTQ_IS_ATTACHED(IFCQ_ALTQ(ifq)))
165			altq_detach(IFCQ_ALTQ(ifq));
166		IFCQ_ALTQ(ifq)->altq_flags = 0;
167	}
168	ifq->ifcq_drain = 0;
169	IFCQ_ALTQ(ifq)->altq_ifcq = NULL;
170	VERIFY(IFCQ_ALTQ(ifq)->altq_type == ALTQT_NONE);
171	VERIFY(IFCQ_ALTQ(ifq)->altq_flags == 0);
172	VERIFY(IFCQ_ALTQ(ifq)->altq_disc == NULL);
173	VERIFY(IFCQ_ALTQ(ifq)->altq_enqueue == NULL);
174	VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue == NULL);
175	VERIFY(IFCQ_ALTQ(ifq)->altq_dequeue_sc == NULL);
176	VERIFY(IFCQ_ALTQ(ifq)->altq_request == NULL);
177#endif /* PF_ALTQ */
178
179	if (IFCQ_IS_READY(ifq)) {
180		if (IFCQ_TBR_IS_ENABLED(ifq)) {
181			struct tb_profile tb = { 0, 0, 0 };
182			(void) ifclassq_tbr_set(ifq, &tb, FALSE);
183		}
184		(void) pktsched_teardown(ifq);
185		ifq->ifcq_flags = 0;
186	}
187	ifq->ifcq_sflags = 0;
188
189	VERIFY(IFCQ_IS_EMPTY(ifq));
190	VERIFY(!IFCQ_TBR_IS_ENABLED(ifq));
191	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
192	VERIFY(ifq->ifcq_flags == 0);
193	VERIFY(ifq->ifcq_sflags == 0);
194	VERIFY(ifq->ifcq_disc == NULL);
195	VERIFY(ifq->ifcq_enqueue == NULL);
196	VERIFY(ifq->ifcq_dequeue == NULL);
197	VERIFY(ifq->ifcq_dequeue_sc == NULL);
198	VERIFY(ifq->ifcq_request == NULL);
199	IFCQ_LEN(ifq) = 0;
200	IFCQ_MAXLEN(ifq) = 0;
201	bzero(&ifq->ifcq_xmitcnt, sizeof (ifq->ifcq_xmitcnt));
202	bzero(&ifq->ifcq_dropcnt, sizeof (ifq->ifcq_dropcnt));
203
204	IFCQ_UNLOCK(ifq);
205}
206
207int
208ifclassq_pktsched_setup(struct ifclassq *ifq)
209{
210	struct ifnet *ifp = ifq->ifcq_ifp;
211	int err = 0;
212
213	IFCQ_LOCK_ASSERT_HELD(ifq);
214	VERIFY(ifp->if_eflags & IFEF_TXSTART);
215
216	switch (ifp->if_output_sched_model) {
217	case IFNET_SCHED_MODEL_DRIVER_MANAGED:
218		err = pktsched_setup(ifq, PKTSCHEDT_TCQ, ifq->ifcq_sflags);
219		break;
220
221	case IFNET_SCHED_MODEL_NORMAL:
222		err = pktsched_setup(ifq, PKTSCHEDT_QFQ, ifq->ifcq_sflags);
223		break;
224
225	default:
226		VERIFY(0);
227		/* NOTREACHED */
228	}
229
230	return (err);
231}
232
233void
234ifclassq_set_maxlen(struct ifclassq *ifq, u_int32_t maxqlen)
235{
236	IFCQ_LOCK(ifq);
237	if (maxqlen == 0)
238		maxqlen = if_sndq_maxlen;
239	IFCQ_SET_MAXLEN(ifq, maxqlen);
240	IFCQ_UNLOCK(ifq);
241}
242
243u_int32_t
244ifclassq_get_maxlen(struct ifclassq *ifq)
245{
246	return (IFCQ_MAXLEN(ifq));
247}
248
249int
250ifclassq_get_len(struct ifclassq *ifq, mbuf_svc_class_t sc, u_int32_t *packets,
251    u_int32_t *bytes)
252{
253	int err = 0;
254
255	IFCQ_LOCK(ifq);
256	if (sc == MBUF_SC_UNSPEC) {
257		VERIFY(packets != NULL);
258		*packets = IFCQ_LEN(ifq);
259	} else {
260		VERIFY(MBUF_VALID_SC(sc));
261		VERIFY(packets != NULL && bytes != NULL);
262		IFCQ_LEN_SC(ifq, sc, packets, bytes, err);
263	}
264	IFCQ_UNLOCK(ifq);
265
266	return (err);
267}
268
269errno_t
270ifclassq_enqueue(struct ifclassq *ifq, struct mbuf *m)
271{
272	errno_t err;
273
274	IFCQ_LOCK_SPIN(ifq);
275
276#if PF_ALTQ
277	if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq))) {
278		ALTQ_ENQUEUE(IFCQ_ALTQ(ifq), m, err);
279	} else {
280		u_int32_t qlen = IFCQ_LEN(ifq);
281		IFCQ_ENQUEUE(ifq, m, err);
282		if (IFCQ_LEN(ifq) > qlen)
283			ifq->ifcq_drain += (IFCQ_LEN(ifq) - qlen);
284	}
285#else /* !PF_ALTQ */
286	IFCQ_ENQUEUE(ifq, m, err);
287#endif /* PF_ALTQ */
288
289	IFCQ_UNLOCK(ifq);
290
291	return (err);
292}
293
294errno_t
295ifclassq_dequeue(struct ifclassq *ifq, u_int32_t limit, struct mbuf **head,
296    struct mbuf **tail, u_int32_t *cnt, u_int32_t *len)
297{
298	return (ifclassq_dequeue_common(ifq, MBUF_SC_UNSPEC, limit, head, tail,
299	    cnt, len, FALSE));
300}
301
302errno_t
303ifclassq_dequeue_sc(struct ifclassq *ifq, mbuf_svc_class_t sc,
304    u_int32_t limit, struct mbuf **head, struct mbuf **tail, u_int32_t *cnt,
305    u_int32_t *len)
306{
307	return (ifclassq_dequeue_common(ifq, sc, limit, head, tail,
308	    cnt, len, TRUE));
309}
310
311static errno_t
312ifclassq_dequeue_common(struct ifclassq *ifq, mbuf_svc_class_t sc,
313    u_int32_t limit, struct mbuf **head, struct mbuf **tail, u_int32_t *cnt,
314    u_int32_t *len, boolean_t drvmgt)
315{
316	struct ifnet *ifp = ifq->ifcq_ifp;
317	u_int32_t i = 0, l = 0;
318	struct mbuf **first, *last;
319#if PF_ALTQ
320	struct ifaltq *altq = IFCQ_ALTQ(ifq);
321	boolean_t draining;
322#endif /* PF_ALTQ */
323
324	VERIFY(!drvmgt || MBUF_VALID_SC(sc));
325
326	*head = NULL;
327	first = &(*head);
328	last = NULL;
329
330	ifq = &ifp->if_snd;
331	IFCQ_LOCK_SPIN(ifq);
332
333	while (i < limit) {
334		u_int64_t pktlen;
335#if PF_ALTQ
336		u_int32_t qlen;
337
338		qlen = IFCQ_LEN(ifq);
339		draining = IFCQ_IS_DRAINING(ifq);
340
341		if (drvmgt) {
342			if (IFCQ_TBR_IS_ENABLED(ifq))
343				IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head);
344			else if (draining)
345				IFCQ_DEQUEUE_SC(ifq, sc, *head);
346			else if (ALTQ_IS_ENABLED(altq))
347				ALTQ_DEQUEUE_SC(altq, sc, *head);
348			else
349				*head = NULL;
350		} else {
351			if (IFCQ_TBR_IS_ENABLED(ifq))
352				IFCQ_TBR_DEQUEUE(ifq, *head);
353			else if (draining)
354				IFCQ_DEQUEUE(ifq, *head);
355			else if (ALTQ_IS_ENABLED(altq))
356				ALTQ_DEQUEUE(altq, *head);
357			else
358				*head = NULL;
359		}
360
361		if (draining && *head != NULL) {
362			VERIFY(ifq->ifcq_drain >= (qlen - IFCQ_LEN(ifq)));
363			ifq->ifcq_drain -= (qlen - IFCQ_LEN(ifq));
364		}
365#else /* ! PF_ALTQ */
366		if (drvmgt) {
367			if (IFCQ_TBR_IS_ENABLED(ifq))
368				IFCQ_TBR_DEQUEUE_SC(ifq, sc, *head);
369			else
370				IFCQ_DEQUEUE_SC(ifq, sc, *head);
371		} else {
372			if (IFCQ_TBR_IS_ENABLED(ifq))
373				IFCQ_TBR_DEQUEUE(ifq, *head);
374			else
375				IFCQ_DEQUEUE(ifq, *head);
376		}
377#endif /* !PF_ALTQ */
378
379		if (*head == NULL)
380			break;
381
382		(*head)->m_nextpkt = NULL;
383		last = *head;
384
385		l += (*head)->m_pkthdr.len;
386		pktlen = (*head)->m_pkthdr.len;
387
388#if MEASURE_BW
389		(*head)->m_pkthdr.pkt_bwseq =
390		    atomic_add_64_ov(&(ifp->if_bw.cur_seq), pktlen);
391#endif /* MEASURE_BW */
392
393		head = &(*head)->m_nextpkt;
394		i++;
395	}
396
397	IFCQ_UNLOCK(ifq);
398
399	if (tail != NULL)
400		*tail = last;
401	if (cnt != NULL)
402		*cnt = i;
403	if (len != NULL)
404		*len = l;
405
406	return ((*first != NULL) ? 0 : EAGAIN);
407}
408
409struct mbuf *
410ifclassq_poll(struct ifclassq *ifq)
411{
412	return (ifclassq_poll_common(ifq, MBUF_SC_UNSPEC, FALSE));
413}
414
415struct mbuf *
416ifclassq_poll_sc(struct ifclassq *ifq, mbuf_svc_class_t sc)
417{
418	return (ifclassq_poll_common(ifq, sc, TRUE));
419}
420
421static struct mbuf *
422ifclassq_poll_common(struct ifclassq *ifq, mbuf_svc_class_t sc,
423    boolean_t drvmgt)
424{
425#if PF_ALTQ
426	struct ifaltq *altq = IFCQ_ALTQ(ifq);
427#endif /* PF_ALTQ */
428	struct mbuf *m;
429
430	VERIFY(!drvmgt || MBUF_VALID_SC(sc));
431
432#if PF_ALTQ
433	if (drvmgt) {
434		if (IFCQ_TBR_IS_ENABLED(ifq))
435			IFCQ_TBR_POLL_SC(ifq, sc, m);
436		else if (IFCQ_IS_DRAINING(ifq))
437			IFCQ_POLL_SC(ifq, sc, m);
438		else if (ALTQ_IS_ENABLED(altq))
439			ALTQ_POLL_SC(altq, sc, m);
440		else
441			m = NULL;
442	} else {
443		if (IFCQ_TBR_IS_ENABLED(ifq))
444			IFCQ_TBR_POLL(ifq, m);
445		else if (IFCQ_IS_DRAINING(ifq))
446			IFCQ_POLL(ifq, m);
447		else if (ALTQ_IS_ENABLED(altq))
448			ALTQ_POLL(altq, m);
449		else
450			m = NULL;
451	}
452#else /* ! PF_ALTQ */
453	if (drvmgt) {
454		if (IFCQ_TBR_IS_ENABLED(ifq))
455			IFCQ_TBR_POLL_SC(ifq, sc, m);
456		else
457			IFCQ_POLL_SC(ifq, sc, m);
458	} else {
459		if (IFCQ_TBR_IS_ENABLED(ifq))
460			IFCQ_TBR_POLL(ifq, m);
461		else
462			IFCQ_POLL(ifq, m);
463	}
464#endif /* !PF_ALTQ */
465
466	return (m);
467}
468
469void
470ifclassq_update(struct ifclassq *ifq, cqev_t ev)
471{
472	IFCQ_LOCK_ASSERT_HELD(ifq);
473	VERIFY(IFCQ_IS_READY(ifq));
474
475#if PF_ALTQ
476	if (ALTQ_IS_ENABLED(IFCQ_ALTQ(ifq)))
477		ALTQ_UPDATE(IFCQ_ALTQ(ifq), ev);
478#endif /* PF_ALTQ */
479	IFCQ_UPDATE(ifq, ev);
480}
481
482int
483ifclassq_attach(struct ifclassq *ifq, u_int32_t type, void *discipline,
484    ifclassq_enq_func enqueue, ifclassq_deq_func dequeue,
485    ifclassq_deq_sc_func dequeue_sc, ifclassq_req_func request)
486{
487	IFCQ_LOCK_ASSERT_HELD(ifq);
488
489	VERIFY(ifq->ifcq_disc == NULL);
490	VERIFY(enqueue != NULL);
491	VERIFY(!(dequeue != NULL && dequeue_sc != NULL));
492	VERIFY(request != NULL);
493
494	ifq->ifcq_type = type;
495	ifq->ifcq_disc = discipline;
496	ifq->ifcq_enqueue = enqueue;
497	ifq->ifcq_dequeue = dequeue;
498	ifq->ifcq_dequeue_sc = dequeue_sc;
499	ifq->ifcq_request = request;
500
501	return (0);
502}
503
504int
505ifclassq_detach(struct ifclassq *ifq)
506{
507	IFCQ_LOCK_ASSERT_HELD(ifq);
508
509	VERIFY(ifq->ifcq_disc == NULL);
510
511	ifq->ifcq_type = PKTSCHEDT_NONE;
512	ifq->ifcq_disc = NULL;
513	ifq->ifcq_enqueue = NULL;
514	ifq->ifcq_dequeue = NULL;
515	ifq->ifcq_dequeue_sc = NULL;
516	ifq->ifcq_request = NULL;
517
518	return (0);
519}
520
521int
522ifclassq_getqstats(struct ifclassq *ifq, u_int32_t qid, void *ubuf,
523    u_int32_t *nbytes)
524{
525	struct if_ifclassq_stats *ifqs;
526	int err;
527
528	if (*nbytes < sizeof (*ifqs))
529		return (EINVAL);
530
531	ifqs = _MALLOC(sizeof (*ifqs), M_TEMP, M_WAITOK | M_ZERO);
532	if (ifqs == NULL)
533		return (ENOMEM);
534
535	IFCQ_LOCK(ifq);
536	if (!IFCQ_IS_READY(ifq)) {
537		IFCQ_UNLOCK(ifq);
538		_FREE(ifqs, M_TEMP);
539		return (ENXIO);
540	}
541
542	ifqs->ifqs_len = IFCQ_LEN(ifq);
543	ifqs->ifqs_maxlen = IFCQ_MAXLEN(ifq);
544	*(&ifqs->ifqs_xmitcnt) = *(&ifq->ifcq_xmitcnt);
545	*(&ifqs->ifqs_dropcnt) = *(&ifq->ifcq_dropcnt);
546	ifqs->ifqs_scheduler = ifq->ifcq_type;
547
548	err = pktsched_getqstats(ifq, qid, ifqs);
549	IFCQ_UNLOCK(ifq);
550
551	if (err == 0 && (err = copyout((caddr_t)ifqs,
552	    (user_addr_t)(uintptr_t)ubuf, sizeof (*ifqs))) == 0)
553		*nbytes = sizeof (*ifqs);
554
555	_FREE(ifqs, M_TEMP);
556
557	return (err);
558}
559
560const char *
561ifclassq_ev2str(cqev_t ev)
562{
563	const char *c;
564
565	switch (ev) {
566	case CLASSQ_EV_LINK_BANDWIDTH:
567		c = "LINK_BANDWIDTH";
568		break;
569
570	case CLASSQ_EV_LINK_LATENCY:
571		c = "LINK_LATENCY";
572		break;
573
574	case CLASSQ_EV_LINK_MTU:
575		c = "LINK_MTU";
576		break;
577
578	case CLASSQ_EV_LINK_UP:
579		c = "LINK_UP";
580		break;
581
582	case CLASSQ_EV_LINK_DOWN:
583		c = "LINK_DOWN";
584		break;
585
586	default:
587		c = "UNKNOWN";
588		break;
589	}
590
591	return (c);
592}
593
594/*
595 * internal representation of token bucket parameters
596 *	rate:	byte_per_unittime << 32
597 *		(((bits_per_sec) / 8) << 32) / machclk_freq
598 *	depth:	byte << 32
599 *
600 */
601#define	TBR_SHIFT	32
602#define	TBR_SCALE(x)	((int64_t)(x) << TBR_SHIFT)
603#define	TBR_UNSCALE(x)	((x) >> TBR_SHIFT)
604
605struct mbuf *
606ifclassq_tbr_dequeue(struct ifclassq *ifq, int op)
607{
608	return (ifclassq_tbr_dequeue_common(ifq, op, MBUF_SC_UNSPEC, FALSE));
609}
610
611struct mbuf *
612ifclassq_tbr_dequeue_sc(struct ifclassq *ifq, int op, mbuf_svc_class_t sc)
613{
614	return (ifclassq_tbr_dequeue_common(ifq, op, sc, TRUE));
615}
616
617static struct mbuf *
618ifclassq_tbr_dequeue_common(struct ifclassq *ifq, int op,
619    mbuf_svc_class_t sc, boolean_t drvmgt)
620{
621	struct tb_regulator *tbr;
622	struct mbuf *m;
623	int64_t interval;
624	u_int64_t now;
625
626	IFCQ_LOCK_ASSERT_HELD(ifq);
627
628	VERIFY(!drvmgt || MBUF_VALID_SC(sc));
629	VERIFY(IFCQ_TBR_IS_ENABLED(ifq));
630
631	tbr = &ifq->ifcq_tbr;
632	if (op == CLASSQDQ_REMOVE && tbr->tbr_lastop == CLASSQDQ_POLL) {
633		/* if this is a remove after poll, bypass tbr check */
634	} else {
635		/* update token only when it is negative */
636		if (tbr->tbr_token <= 0) {
637			now = read_machclk();
638			interval = now - tbr->tbr_last;
639			if (interval >= tbr->tbr_filluptime) {
640				tbr->tbr_token = tbr->tbr_depth;
641			} else {
642				tbr->tbr_token += interval * tbr->tbr_rate;
643				if (tbr->tbr_token > tbr->tbr_depth)
644					tbr->tbr_token = tbr->tbr_depth;
645			}
646			tbr->tbr_last = now;
647		}
648		/* if token is still negative, don't allow dequeue */
649		if (tbr->tbr_token <= 0)
650			return (NULL);
651	}
652
653	/*
654	 * ifclassq takes precedence over ALTQ queue;
655	 * ifcq_drain count is adjusted by the caller.
656	 */
657#if PF_ALTQ
658	if (IFCQ_IS_DRAINING(ifq)) {
659#endif /* PF_ALTQ */
660		if (op == CLASSQDQ_POLL) {
661			if (drvmgt)
662				IFCQ_POLL_SC(ifq, sc, m);
663			else
664				IFCQ_POLL(ifq, m);
665		} else {
666			if (drvmgt)
667				IFCQ_DEQUEUE_SC(ifq, sc, m);
668			else
669				IFCQ_DEQUEUE(ifq, m);
670		}
671#if PF_ALTQ
672	} else {
673		struct ifaltq *altq = IFCQ_ALTQ(ifq);
674		if (ALTQ_IS_ENABLED(altq)) {
675			if (drvmgt)
676				m = (*altq->altq_dequeue_sc)(altq, sc, op);
677			else
678				m = (*altq->altq_dequeue)(altq, op);
679		} else {
680			m = NULL;
681		}
682	}
683#endif /* PF_ALTQ */
684
685	if (m != NULL && op == CLASSQDQ_REMOVE)
686		tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
687	tbr->tbr_lastop = op;
688
689	return (m);
690}
691
692/*
693 * set a token bucket regulator.
694 * if the specified rate is zero, the token bucket regulator is deleted.
695 */
696int
697ifclassq_tbr_set(struct ifclassq *ifq, struct tb_profile *profile,
698    boolean_t update)
699{
700	struct tb_regulator *tbr;
701	struct ifnet *ifp = ifq->ifcq_ifp;
702	u_int64_t rate, old_rate;
703
704	IFCQ_LOCK_ASSERT_HELD(ifq);
705	VERIFY(IFCQ_IS_READY(ifq));
706
707	VERIFY(machclk_freq != 0);
708
709	tbr = &ifq->ifcq_tbr;
710	old_rate = tbr->tbr_rate_raw;
711
712	rate = profile->rate;
713	if (profile->percent > 0) {
714		u_int64_t eff_rate;
715
716		if (profile->percent > 100)
717			return (EINVAL);
718		if ((eff_rate = ifp->if_output_bw.eff_bw) == 0)
719			return (ENODEV);
720		rate = (eff_rate * profile->percent) / 100;
721	}
722
723	if (rate == 0) {
724		if (!IFCQ_TBR_IS_ENABLED(ifq))
725			return (ENOENT);
726
727		if (pktsched_verbose)
728			printf("%s: TBR disabled\n", if_name(ifp));
729
730		/* disable this TBR */
731		ifq->ifcq_flags &= ~IFCQF_TBR;
732		bzero(tbr, sizeof (*tbr));
733		ifnet_set_start_cycle(ifp, NULL);
734		if (update)
735			ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH);
736		return (0);
737	}
738
739	if (pktsched_verbose) {
740		printf("%s: TBR %s (rate %llu bps depth %u)\n", if_name(ifp),
741		    (ifq->ifcq_flags & IFCQF_TBR) ? "reconfigured" :
742		    "enabled", rate, profile->depth);
743	}
744
745	/* set the new TBR */
746	bzero(tbr, sizeof (*tbr));
747	tbr->tbr_rate_raw = rate;
748	tbr->tbr_percent = profile->percent;
749	ifq->ifcq_flags |= IFCQF_TBR;
750
751	/*
752	 * Note that the TBR fill up time (hence the ifnet restart time)
753	 * is directly related to the specified TBR depth.  The ideal
754	 * depth value should be computed such that the interval time
755	 * between each successive wakeup is adequately spaced apart,
756	 * in order to reduce scheduling overheads.  A target interval
757	 * of 10 ms seems to provide good performance balance.  This can be
758	 * overridden by specifying the depth profile.  Values smaller than
759	 * the ideal depth will reduce delay at the expense of CPU cycles.
760	 */
761	tbr->tbr_rate = TBR_SCALE(rate / 8) / machclk_freq;
762	if (tbr->tbr_rate > 0) {
763		u_int32_t mtu = ifp->if_mtu;
764		int64_t ival, idepth = 0;
765		int i;
766
767		if (mtu < IF_MINMTU)
768			mtu = IF_MINMTU;
769
770		ival = pktsched_nsecs_to_abstime(10 * NSEC_PER_MSEC); /* 10ms */
771
772		for (i = 1; ; i++) {
773			idepth = TBR_SCALE(i * mtu);
774			if ((idepth / tbr->tbr_rate) > ival)
775				break;
776		}
777		VERIFY(idepth > 0);
778
779		tbr->tbr_depth = TBR_SCALE(profile->depth);
780		if (tbr->tbr_depth == 0) {
781			tbr->tbr_filluptime = idepth / tbr->tbr_rate;
782			/* a little fudge factor to get closer to rate */
783			tbr->tbr_depth = idepth + (idepth >> 3);
784		} else {
785			tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
786		}
787	} else {
788		tbr->tbr_depth = TBR_SCALE(profile->depth);
789		tbr->tbr_filluptime = 0xffffffffffffffffLL;
790	}
791	tbr->tbr_token = tbr->tbr_depth;
792	tbr->tbr_last = read_machclk();
793	tbr->tbr_lastop = CLASSQDQ_REMOVE;
794
795	if (tbr->tbr_rate > 0 && (ifp->if_flags & IFF_UP)) {
796		struct timespec ts =
797		    { 0, pktsched_abs_to_nsecs(tbr->tbr_filluptime) };
798		if (pktsched_verbose) {
799			printf("%s: TBR calculated tokens %lld "
800			    "filluptime %llu ns\n", if_name(ifp),
801			    TBR_UNSCALE(tbr->tbr_token),
802			    pktsched_abs_to_nsecs(tbr->tbr_filluptime));
803		}
804		ifnet_set_start_cycle(ifp, &ts);
805	} else {
806		if (pktsched_verbose) {
807			if (tbr->tbr_rate == 0) {
808				printf("%s: TBR calculated tokens %lld "
809				    "infinite filluptime\n", if_name(ifp),
810				    TBR_UNSCALE(tbr->tbr_token));
811			} else if (!(ifp->if_flags & IFF_UP)) {
812				printf("%s: TBR suspended (link is down)\n",
813				    if_name(ifp));
814			}
815		}
816		ifnet_set_start_cycle(ifp, NULL);
817	}
818	if (update && tbr->tbr_rate_raw != old_rate)
819		ifclassq_update(ifq, CLASSQ_EV_LINK_BANDWIDTH);
820
821	return (0);
822}
823