pfctl_altq.c revision 1.5.2.1
177943Sdfr/*	$NetBSD: pfctl_altq.c,v 1.5.2.1 2006/03/18 18:44:20 peter Exp $	*/
277943Sdfr/*	$OpenBSD: pfctl_altq.c,v 1.86 2005/02/28 14:04:51 henning Exp $	*/
377943Sdfr
477943Sdfr/*
577943Sdfr * Copyright (c) 2002
677943Sdfr *	Sony Computer Science Laboratories Inc.
777943Sdfr * Copyright (c) 2002, 2003 Henning Brauer <henning@openbsd.org>
877943Sdfr *
977943Sdfr * Permission to use, copy, modify, and distribute this software for any
1077943Sdfr * purpose with or without fee is hereby granted, provided that the above
1177943Sdfr * copyright notice and this permission notice appear in all copies.
1277943Sdfr *
1377943Sdfr * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
1477943Sdfr * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
1577943Sdfr * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
1677943Sdfr * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
1777943Sdfr * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
1877943Sdfr * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1977943Sdfr * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
2077943Sdfr */
2177943Sdfr
2277943Sdfr#include <sys/types.h>
2377943Sdfr#include <sys/ioctl.h>
2477943Sdfr#include <sys/socket.h>
2577943Sdfr#ifdef __NetBSD__
2677943Sdfr#include <sys/param.h>
27113038Sobrien#include <sys/mbuf.h>
28113038Sobrien#endif
2978327Sobrien
3077943Sdfr#include <net/if.h>
31271996Semaste#include <netinet/in.h>
3277943Sdfr#include <net/pfvar.h>
33107723Smarcel
3477943Sdfr#include <err.h>
3577943Sdfr#include <errno.h>
3677943Sdfr#include <limits.h>
3777943Sdfr#include <math.h>
3877943Sdfr#include <stdio.h>
3977943Sdfr#include <stdlib.h>
40107723Smarcel#include <string.h>
41107723Smarcel#include <unistd.h>
42107723Smarcel
43107723Smarcel#include <altq/altq.h>
44107723Smarcel#include <altq/altq_cbq.h>
45107723Smarcel#include <altq/altq_priq.h>
46107723Smarcel#include <altq/altq_hfsc.h>
47107723Smarcel
48107723Smarcel#include "pfctl_parser.h"
49107723Smarcel#include "pfctl.h"
50107723Smarcel
51107723Smarcel#define is_sc_null(sc)	(((sc) == NULL) || ((sc)->m1 == 0 && (sc)->m2 == 0))
52107723Smarcel
53107723SmarcelTAILQ_HEAD(altqs, pf_altq) altqs = TAILQ_HEAD_INITIALIZER(altqs);
54107723SmarcelLIST_HEAD(gen_sc, segment) rtsc, lssc;
55107723Smarcel
56107723Smarcelstruct pf_altq	*qname_to_pfaltq(const char *, const char *);
57107723Smarcelu_int32_t	 qname_to_qid(const char *);
58107723Smarcel
59107723Smarcelstatic int	eval_pfqueue_cbq(struct pfctl *, struct pf_altq *);
60107723Smarcelstatic int	cbq_compute_idletime(struct pfctl *, struct pf_altq *);
61107733Smarcelstatic int	check_commit_cbq(int, int, struct pf_altq *);
62107733Smarcelstatic int	print_cbq_opts(const struct pf_altq *);
63107733Smarcel
64107733Smarcelstatic int	eval_pfqueue_priq(struct pfctl *, struct pf_altq *);
65107733Smarcelstatic int	check_commit_priq(int, int, struct pf_altq *);
66107733Smarcelstatic int	print_priq_opts(const struct pf_altq *);
67107733Smarcel
68107733Smarcelstatic int	eval_pfqueue_hfsc(struct pfctl *, struct pf_altq *);
69107733Smarcelstatic int	check_commit_hfsc(int, int, struct pf_altq *);
70107733Smarcelstatic int	print_hfsc_opts(const struct pf_altq *,
71107733Smarcel		    const struct node_queue_opt *);
72107733Smarcel
73107733Smarcelstatic void		 gsc_add_sc(struct gen_sc *, struct service_curve *);
74107733Smarcelstatic int		 is_gsc_under_sc(struct gen_sc *,
75107723Smarcel			     struct service_curve *);
76107723Smarcelstatic void		 gsc_destroy(struct gen_sc *);
77107723Smarcelstatic struct segment	*gsc_getentry(struct gen_sc *, double);
78107723Smarcelstatic int		 gsc_add_seg(struct gen_sc *, double, double, double,
79107723Smarcel			     double);
80107723Smarcelstatic double		 sc_x2y(struct service_curve *, double);
81107723Smarcel
8277943Sdfru_int32_t	 getifspeed(char *);
83107723Smarcelu_long		 getifmtu(char *);
8477943Sdfrint		 eval_queue_opts(struct pf_altq *, struct node_queue_opt *,
85107723Smarcel		     u_int32_t);
86271996Semasteu_int32_t	 eval_bwspec(struct node_queue_bw *, u_int32_t);
87271996Semastevoid		 print_hfsc_sc(const char *, u_int, u_int, u_int,
88271996Semaste		     const struct node_hfsc_sc *);
89107723Smarcel
90107723Smarcelvoid
91107723Smarcelpfaltq_store(struct pf_altq *a)
92107723Smarcel{
93107723Smarcel	struct pf_altq	*altq;
9477943Sdfr
9577943Sdfr	if ((altq = malloc(sizeof(*altq))) == NULL)
9677943Sdfr		err(1, "malloc");
9777943Sdfr	memcpy(altq, a, sizeof(struct pf_altq));
98107723Smarcel	TAILQ_INSERT_TAIL(&altqs, altq, entries);
99271996Semaste}
100271996Semaste
101271996Semastevoid
102271996Semastepfaltq_free(struct pf_altq *a)
103271996Semaste{
104271996Semaste	struct pf_altq	*altq;
105281323Sjhb
106107723Smarcel	TAILQ_FOREACH(altq, &altqs, entries) {
107107723Smarcel		if (strncmp(a->ifname, altq->ifname, IFNAMSIZ) == 0 &&
108107723Smarcel		    strncmp(a->qname, altq->qname, PF_QNAME_SIZE) == 0) {
109107723Smarcel			TAILQ_REMOVE(&altqs, altq, entries);
110107723Smarcel			free(altq);
111163929Smarcel			return;
112107723Smarcel		}
113107723Smarcel	}
114107723Smarcel}
115107723Smarcel
116107723Smarcelstruct pf_altq *
117107723Smarcelpfaltq_lookup(const char *ifname)
118107723Smarcel{
119107723Smarcel	struct pf_altq	*altq;
120107723Smarcel
121107723Smarcel	TAILQ_FOREACH(altq, &altqs, entries) {
122107723Smarcel		if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
123107723Smarcel		    altq->qname[0] == 0)
124107723Smarcel			return (altq);
125107723Smarcel	}
126107723Smarcel	return (NULL);
127107723Smarcel}
128107723Smarcel
129107723Smarcelstruct pf_altq *
130107723Smarcelqname_to_pfaltq(const char *qname, const char *ifname)
131111692Smarcel{
132107723Smarcel	struct pf_altq	*altq;
133107723Smarcel
134107723Smarcel	TAILQ_FOREACH(altq, &altqs, entries) {
135107723Smarcel		if (strncmp(ifname, altq->ifname, IFNAMSIZ) == 0 &&
136107723Smarcel		    strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
137107723Smarcel			return (altq);
138107723Smarcel	}
139107723Smarcel	return (NULL);
140107723Smarcel}
141107723Smarcel
142107723Smarcelu_int32_t
143107723Smarcelqname_to_qid(const char *qname)
144107723Smarcel{
145107723Smarcel	struct pf_altq	*altq;
146107723Smarcel
147107723Smarcel	/*
148243978Srpaulo	 * We guarantee that same named queues on different interfaces
149107723Smarcel	 * have the same qid, so we do NOT need to limit matching on
150107723Smarcel	 * one interface!
151107723Smarcel	 */
152107723Smarcel
153107723Smarcel	TAILQ_FOREACH(altq, &altqs, entries) {
154107723Smarcel		if (strncmp(qname, altq->qname, PF_QNAME_SIZE) == 0)
155107723Smarcel			return (altq->qid);
156107723Smarcel	}
157107723Smarcel	return (0);
158107723Smarcel}
159107723Smarcel
160107723Smarcelvoid
161107723Smarcelprint_altq(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
162107723Smarcel	struct node_queue_opt *qopts)
163107723Smarcel{
164107723Smarcel	if (a->qname[0] != 0) {
165107723Smarcel		print_queue(a, level, bw, 0, qopts);
166107723Smarcel		return;
167107723Smarcel	}
168107723Smarcel
169107723Smarcel	printf("altq on %s ", a->ifname);
170107723Smarcel
171107723Smarcel	switch (a->scheduler) {
172107723Smarcel	case ALTQT_CBQ:
173107723Smarcel		if (!print_cbq_opts(a))
174107723Smarcel			printf("cbq ");
175107723Smarcel		break;
176107723Smarcel	case ALTQT_PRIQ:
177107723Smarcel		if (!print_priq_opts(a))
178107723Smarcel			printf("priq ");
179107723Smarcel		break;
180107723Smarcel	case ALTQT_HFSC:
181107723Smarcel		if (!print_hfsc_opts(a, qopts))
182107723Smarcel			printf("hfsc ");
183107723Smarcel		break;
184107723Smarcel	}
185107723Smarcel
186107723Smarcel	if (bw != NULL && bw->bw_percent > 0) {
187107723Smarcel		if (bw->bw_percent < 100)
188107723Smarcel			printf("bandwidth %u%% ", bw->bw_percent);
189107723Smarcel	} else
190107723Smarcel		printf("bandwidth %s ", rate2str((double)a->ifbandwidth));
191107723Smarcel
192107723Smarcel	if (a->qlimit != DEFAULT_QLIMIT)
193107723Smarcel		printf("qlimit %u ", a->qlimit);
194107723Smarcel	printf("tbrsize %u ", a->tbrsize);
195107723Smarcel}
196107723Smarcel
197107723Smarcelvoid
19877943Sdfrprint_queue(const struct pf_altq *a, unsigned level, struct node_queue_bw *bw,
199    int print_interface, struct node_queue_opt *qopts)
200{
201	unsigned	i;
202
203	printf("queue ");
204	for (i = 0; i < level; ++i)
205		printf(" ");
206	printf("%s ", a->qname);
207	if (print_interface)
208		printf("on %s ", a->ifname);
209	if (a->scheduler == ALTQT_CBQ || a->scheduler == ALTQT_HFSC) {
210		if (bw != NULL && bw->bw_percent > 0) {
211			if (bw->bw_percent < 100)
212				printf("bandwidth %u%% ", bw->bw_percent);
213		} else
214			printf("bandwidth %s ", rate2str((double)a->bandwidth));
215	}
216	if (a->priority != DEFAULT_PRIORITY)
217		printf("priority %u ", a->priority);
218	if (a->qlimit != DEFAULT_QLIMIT)
219		printf("qlimit %u ", a->qlimit);
220	switch (a->scheduler) {
221	case ALTQT_CBQ:
222		print_cbq_opts(a);
223		break;
224	case ALTQT_PRIQ:
225		print_priq_opts(a);
226		break;
227	case ALTQT_HFSC:
228		print_hfsc_opts(a, qopts);
229		break;
230	}
231}
232
233/*
234 * eval_pfaltq computes the discipline parameters.
235 */
236int
237eval_pfaltq(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
238    struct node_queue_opt *opts)
239{
240	u_int	rate, size, errors = 0;
241
242	if (bw->bw_absolute > 0)
243		pa->ifbandwidth = bw->bw_absolute;
244	else
245		if ((rate = getifspeed(pa->ifname)) == 0) {
246			fprintf(stderr, "cannot determine interface bandwidth "
247			    "for %s, specify an absolute bandwidth\n",
248			    pa->ifname);
249			errors++;
250		} else if ((pa->ifbandwidth = eval_bwspec(bw, rate)) == 0)
251			pa->ifbandwidth = rate;
252
253	errors += eval_queue_opts(pa, opts, pa->ifbandwidth);
254
255	/* if tbrsize is not specified, use heuristics */
256	if (pa->tbrsize == 0) {
257		rate = pa->ifbandwidth;
258		if (rate <= 1 * 1000 * 1000)
259			size = 1;
260		else if (rate <= 10 * 1000 * 1000)
261			size = 4;
262		else if (rate <= 200 * 1000 * 1000)
263			size = 8;
264		else
265			size = 24;
266		size = size * getifmtu(pa->ifname);
267		if (size > 0xffff)
268			size = 0xffff;
269		pa->tbrsize = size;
270	}
271	return (errors);
272}
273
274/*
275 * check_commit_altq does consistency check for each interface
276 */
277int
278check_commit_altq(int dev, int opts)
279{
280	struct pf_altq	*altq;
281	int		 error = 0;
282
283	/* call the discipline check for each interface. */
284	TAILQ_FOREACH(altq, &altqs, entries) {
285		if (altq->qname[0] == 0) {
286			switch (altq->scheduler) {
287			case ALTQT_CBQ:
288				error = check_commit_cbq(dev, opts, altq);
289				break;
290			case ALTQT_PRIQ:
291				error = check_commit_priq(dev, opts, altq);
292				break;
293			case ALTQT_HFSC:
294				error = check_commit_hfsc(dev, opts, altq);
295				break;
296			default:
297				break;
298			}
299		}
300	}
301	return (error);
302}
303
304/*
305 * eval_pfqueue computes the queue parameters.
306 */
307int
308eval_pfqueue(struct pfctl *pf, struct pf_altq *pa, struct node_queue_bw *bw,
309    struct node_queue_opt *opts)
310{
311	/* should be merged with expand_queue */
312	struct pf_altq	*if_pa, *parent, *altq;
313	u_int32_t	 bwsum;
314	int		 error = 0;
315
316	/* find the corresponding interface and copy fields used by queues */
317	if ((if_pa = pfaltq_lookup(pa->ifname)) == NULL) {
318		fprintf(stderr, "altq not defined on %s\n", pa->ifname);
319		return (1);
320	}
321	pa->scheduler = if_pa->scheduler;
322	pa->ifbandwidth = if_pa->ifbandwidth;
323
324	if (qname_to_pfaltq(pa->qname, pa->ifname) != NULL) {
325		fprintf(stderr, "queue %s already exists on interface %s\n",
326		    pa->qname, pa->ifname);
327		return (1);
328	}
329	pa->qid = qname_to_qid(pa->qname);
330
331	parent = NULL;
332	if (pa->parent[0] != 0) {
333		parent = qname_to_pfaltq(pa->parent, pa->ifname);
334		if (parent == NULL) {
335			fprintf(stderr, "parent %s not found for %s\n",
336			    pa->parent, pa->qname);
337			return (1);
338		}
339		pa->parent_qid = parent->qid;
340	}
341	if (pa->qlimit == 0)
342		pa->qlimit = DEFAULT_QLIMIT;
343
344	if (pa->scheduler == ALTQT_CBQ || pa->scheduler == ALTQT_HFSC) {
345		pa->bandwidth = eval_bwspec(bw,
346		    parent == NULL ? 0 : parent->bandwidth);
347
348		if (pa->bandwidth > pa->ifbandwidth) {
349			fprintf(stderr, "bandwidth for %s higher than "
350			    "interface\n", pa->qname);
351			return (1);
352		}
353		/* check the sum of the child bandwidth is under parent's */
354		if (parent != NULL) {
355			if (pa->bandwidth > parent->bandwidth) {
356				warnx("bandwidth for %s higher than parent",
357				    pa->qname);
358				return (1);
359			}
360			bwsum = 0;
361			TAILQ_FOREACH(altq, &altqs, entries) {
362				if (strncmp(altq->ifname, pa->ifname,
363				    IFNAMSIZ) == 0 &&
364				    altq->qname[0] != 0 &&
365				    strncmp(altq->parent, pa->parent,
366				    PF_QNAME_SIZE) == 0)
367					bwsum += altq->bandwidth;
368			}
369			bwsum += pa->bandwidth;
370			if (bwsum > parent->bandwidth) {
371				warnx("the sum of the child bandwidth higher"
372				    " than parent \"%s\"", parent->qname);
373			}
374		}
375	}
376
377	if (eval_queue_opts(pa, opts, parent == NULL? 0 : parent->bandwidth))
378		return (1);
379
380	switch (pa->scheduler) {
381	case ALTQT_CBQ:
382		error = eval_pfqueue_cbq(pf, pa);
383		break;
384	case ALTQT_PRIQ:
385		error = eval_pfqueue_priq(pf, pa);
386		break;
387	case ALTQT_HFSC:
388		error = eval_pfqueue_hfsc(pf, pa);
389		break;
390	default:
391		break;
392	}
393	return (error);
394}
395
396/*
397 * CBQ support functions
398 */
399#define	RM_FILTER_GAIN	5	/* log2 of gain, e.g., 5 => 31/32 */
400#define	RM_NS_PER_SEC	(1000000000)
401
402static int
403eval_pfqueue_cbq(struct pfctl *pf, struct pf_altq *pa)
404{
405	struct cbq_opts	*opts;
406	u_int		 ifmtu;
407
408	if (pa->priority >= CBQ_MAXPRI) {
409		warnx("priority out of range: max %d", CBQ_MAXPRI - 1);
410		return (-1);
411	}
412
413	ifmtu = getifmtu(pa->ifname);
414	opts = &pa->pq_u.cbq_opts;
415
416	if (opts->pktsize == 0) {	/* use default */
417		opts->pktsize = ifmtu;
418		if (opts->pktsize > MCLBYTES)	/* do what TCP does */
419			opts->pktsize &= ~MCLBYTES;
420	} else if (opts->pktsize > ifmtu)
421		opts->pktsize = ifmtu;
422	if (opts->maxpktsize == 0)	/* use default */
423		opts->maxpktsize = ifmtu;
424	else if (opts->maxpktsize > ifmtu)
425		opts->pktsize = ifmtu;
426
427	if (opts->pktsize > opts->maxpktsize)
428		opts->pktsize = opts->maxpktsize;
429
430	if (pa->parent[0] == 0)
431		opts->flags |= (CBQCLF_ROOTCLASS | CBQCLF_WRR);
432
433	cbq_compute_idletime(pf, pa);
434	return (0);
435}
436
437/*
438 * compute ns_per_byte, maxidle, minidle, and offtime
439 */
440static int
441cbq_compute_idletime(struct pfctl *pf, struct pf_altq *pa)
442{
443	struct cbq_opts	*opts;
444	double		 maxidle_s, maxidle, minidle;
445	double		 offtime, nsPerByte, ifnsPerByte, ptime, cptime;
446	double		 z, g, f, gton, gtom;
447	u_int		 minburst, maxburst;
448
449	opts = &pa->pq_u.cbq_opts;
450	ifnsPerByte = (1.0 / (double)pa->ifbandwidth) * RM_NS_PER_SEC * 8;
451	minburst = opts->minburst;
452	maxburst = opts->maxburst;
453
454	if (pa->bandwidth == 0)
455		f = 0.0001;	/* small enough? */
456	else
457		f = ((double) pa->bandwidth / (double) pa->ifbandwidth);
458
459	nsPerByte = ifnsPerByte / f;
460	ptime = (double)opts->pktsize * ifnsPerByte;
461	cptime = ptime * (1.0 - f) / f;
462
463	if (nsPerByte * (double)opts->maxpktsize > (double)INT_MAX) {
464		/*
465		 * this causes integer overflow in kernel!
466		 * (bandwidth < 6Kbps when max_pkt_size=1500)
467		 */
468		if (pa->bandwidth != 0 && (pf->opts & PF_OPT_QUIET) == 0)
469			warnx("queue bandwidth must be larger than %s",
470			    rate2str(ifnsPerByte * (double)opts->maxpktsize /
471			    (double)INT_MAX * (double)pa->ifbandwidth));
472			fprintf(stderr, "cbq: queue %s is too slow!\n",
473			    pa->qname);
474		nsPerByte = (double)(INT_MAX / opts->maxpktsize);
475	}
476
477	if (maxburst == 0) {  /* use default */
478		if (cptime > 10.0 * 1000000)
479			maxburst = 4;
480		else
481			maxburst = 16;
482	}
483	if (minburst == 0)  /* use default */
484		minburst = 2;
485	if (minburst > maxburst)
486		minburst = maxburst;
487
488	z = (double)(1 << RM_FILTER_GAIN);
489	g = (1.0 - 1.0 / z);
490	gton = pow(g, (double)maxburst);
491	gtom = pow(g, (double)(minburst-1));
492	maxidle = ((1.0 / f - 1.0) * ((1.0 - gton) / gton));
493	maxidle_s = (1.0 - g);
494	if (maxidle > maxidle_s)
495		maxidle = ptime * maxidle;
496	else
497		maxidle = ptime * maxidle_s;
498	if (minburst)
499		offtime = cptime * (1.0 + 1.0/(1.0 - g) * (1.0 - gtom) / gtom);
500	else
501		offtime = cptime;
502	minidle = -((double)opts->maxpktsize * (double)nsPerByte);
503
504	/* scale parameters */
505	maxidle = ((maxidle * 8.0) / nsPerByte) *
506	    pow(2.0, (double)RM_FILTER_GAIN);
507	offtime = (offtime * 8.0) / nsPerByte *
508	    pow(2.0, (double)RM_FILTER_GAIN);
509	minidle = ((minidle * 8.0) / nsPerByte) *
510	    pow(2.0, (double)RM_FILTER_GAIN);
511
512	maxidle = maxidle / 1000.0;
513	offtime = offtime / 1000.0;
514	minidle = minidle / 1000.0;
515
516	opts->minburst = minburst;
517	opts->maxburst = maxburst;
518	opts->ns_per_byte = (u_int)nsPerByte;
519	opts->maxidle = (u_int)fabs(maxidle);
520	opts->minidle = (int)minidle;
521	opts->offtime = (u_int)fabs(offtime);
522
523	return (0);
524}
525
526static int
527check_commit_cbq(int dev, int opts, struct pf_altq *pa)
528{
529	struct pf_altq	*altq;
530	int		 root_class, default_class;
531	int		 error = 0;
532
533	/*
534	 * check if cbq has one root queue and one default queue
535	 * for this interface
536	 */
537	root_class = default_class = 0;
538	TAILQ_FOREACH(altq, &altqs, entries) {
539		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
540			continue;
541		if (altq->qname[0] == 0)  /* this is for interface */
542			continue;
543		if (altq->pq_u.cbq_opts.flags & CBQCLF_ROOTCLASS)
544			root_class++;
545		if (altq->pq_u.cbq_opts.flags & CBQCLF_DEFCLASS)
546			default_class++;
547	}
548	if (root_class != 1) {
549		warnx("should have one root queue on %s", pa->ifname);
550		error++;
551	}
552	if (default_class != 1) {
553		warnx("should have one default queue on %s", pa->ifname);
554		error++;
555	}
556	return (error);
557}
558
559static int
560print_cbq_opts(const struct pf_altq *a)
561{
562	const struct cbq_opts	*opts;
563
564	opts = &a->pq_u.cbq_opts;
565	if (opts->flags) {
566		printf("cbq(");
567		if (opts->flags & CBQCLF_RED)
568			printf(" red");
569		if (opts->flags & CBQCLF_ECN)
570			printf(" ecn");
571		if (opts->flags & CBQCLF_RIO)
572			printf(" rio");
573		if (opts->flags & CBQCLF_CLEARDSCP)
574			printf(" cleardscp");
575		if (opts->flags & CBQCLF_FLOWVALVE)
576			printf(" flowvalve");
577#ifdef CBQCLF_BORROW
578		if (opts->flags & CBQCLF_BORROW)
579			printf(" borrow");
580#endif
581		if (opts->flags & CBQCLF_WRR)
582			printf(" wrr");
583		if (opts->flags & CBQCLF_EFFICIENT)
584			printf(" efficient");
585		if (opts->flags & CBQCLF_ROOTCLASS)
586			printf(" root");
587		if (opts->flags & CBQCLF_DEFCLASS)
588			printf(" default");
589		printf(" ) ");
590
591		return (1);
592	} else
593		return (0);
594}
595
596/*
597 * PRIQ support functions
598 */
599static int
600eval_pfqueue_priq(struct pfctl *pf, struct pf_altq *pa)
601{
602	struct pf_altq	*altq;
603
604	if (pa->priority >= PRIQ_MAXPRI) {
605		warnx("priority out of range: max %d", PRIQ_MAXPRI - 1);
606		return (-1);
607	}
608	/* the priority should be unique for the interface */
609	TAILQ_FOREACH(altq, &altqs, entries) {
610		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) == 0 &&
611		    altq->qname[0] != 0 && altq->priority == pa->priority) {
612			warnx("%s and %s have the same priority",
613			    altq->qname, pa->qname);
614			return (-1);
615		}
616	}
617
618	return (0);
619}
620
621static int
622check_commit_priq(int dev, int opts, struct pf_altq *pa)
623{
624	struct pf_altq	*altq;
625	int		 default_class;
626	int		 error = 0;
627
628	/*
629	 * check if priq has one default class for this interface
630	 */
631	default_class = 0;
632	TAILQ_FOREACH(altq, &altqs, entries) {
633		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
634			continue;
635		if (altq->qname[0] == 0)  /* this is for interface */
636			continue;
637		if (altq->pq_u.priq_opts.flags & PRCF_DEFAULTCLASS)
638			default_class++;
639	}
640	if (default_class != 1) {
641		warnx("should have one default queue on %s", pa->ifname);
642		error++;
643	}
644	return (error);
645}
646
647static int
648print_priq_opts(const struct pf_altq *a)
649{
650	const struct priq_opts	*opts;
651
652	opts = &a->pq_u.priq_opts;
653
654	if (opts->flags) {
655		printf("priq(");
656		if (opts->flags & PRCF_RED)
657			printf(" red");
658		if (opts->flags & PRCF_ECN)
659			printf(" ecn");
660		if (opts->flags & PRCF_RIO)
661			printf(" rio");
662		if (opts->flags & PRCF_CLEARDSCP)
663			printf(" cleardscp");
664		if (opts->flags & PRCF_DEFAULTCLASS)
665			printf(" default");
666		printf(" ) ");
667
668		return (1);
669	} else
670		return (0);
671}
672
673/*
674 * HFSC support functions
675 */
676static int
677eval_pfqueue_hfsc(struct pfctl *pf, struct pf_altq *pa)
678{
679	struct pf_altq		*altq, *parent;
680	struct hfsc_opts	*opts;
681	struct service_curve	 sc;
682
683	opts = &pa->pq_u.hfsc_opts;
684
685	if (pa->parent[0] == 0) {
686		/* root queue */
687		opts->lssc_m1 = pa->ifbandwidth;
688		opts->lssc_m2 = pa->ifbandwidth;
689		opts->lssc_d = 0;
690		return (0);
691	}
692
693	LIST_INIT(&rtsc);
694	LIST_INIT(&lssc);
695
696	/* if link_share is not specified, use bandwidth */
697	if (opts->lssc_m2 == 0)
698		opts->lssc_m2 = pa->bandwidth;
699
700	if ((opts->rtsc_m1 > 0 && opts->rtsc_m2 == 0) ||
701	    (opts->lssc_m1 > 0 && opts->lssc_m2 == 0) ||
702	    (opts->ulsc_m1 > 0 && opts->ulsc_m2 == 0)) {
703		warnx("m2 is zero for %s", pa->qname);
704		return (-1);
705	}
706
707	if ((opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
708	    (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0) ||
709	    (opts->rtsc_m1 < opts->rtsc_m2 && opts->rtsc_m1 != 0)) {
710		warnx("m1 must be zero for convex curve: %s", pa->qname);
711		return (-1);
712	}
713
714	/*
715	 * admission control:
716	 * for the real-time service curve, the sum of the service curves
717	 * should not exceed 80% of the interface bandwidth.  20% is reserved
718	 * not to over-commit the actual interface bandwidth.
719	 * for the linkshare service curve, the sum of the child service
720	 * curve should not exceed the parent service curve.
721	 * for the upper-limit service curve, the assigned bandwidth should
722	 * be smaller than the interface bandwidth, and the upper-limit should
723	 * be larger than the real-time service curve when both are defined.
724	 */
725	parent = qname_to_pfaltq(pa->parent, pa->ifname);
726	if (parent == NULL)
727		errx(1, "parent %s not found for %s", pa->parent, pa->qname);
728
729	TAILQ_FOREACH(altq, &altqs, entries) {
730		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
731			continue;
732		if (altq->qname[0] == 0)  /* this is for interface */
733			continue;
734
735		/* if the class has a real-time service curve, add it. */
736		if (opts->rtsc_m2 != 0 && altq->pq_u.hfsc_opts.rtsc_m2 != 0) {
737			sc.m1 = altq->pq_u.hfsc_opts.rtsc_m1;
738			sc.d = altq->pq_u.hfsc_opts.rtsc_d;
739			sc.m2 = altq->pq_u.hfsc_opts.rtsc_m2;
740			gsc_add_sc(&rtsc, &sc);
741		}
742
743		if (strncmp(altq->parent, pa->parent, PF_QNAME_SIZE) != 0)
744			continue;
745
746		/* if the class has a linkshare service curve, add it. */
747		if (opts->lssc_m2 != 0 && altq->pq_u.hfsc_opts.lssc_m2 != 0) {
748			sc.m1 = altq->pq_u.hfsc_opts.lssc_m1;
749			sc.d = altq->pq_u.hfsc_opts.lssc_d;
750			sc.m2 = altq->pq_u.hfsc_opts.lssc_m2;
751			gsc_add_sc(&lssc, &sc);
752		}
753	}
754
755	/* check the real-time service curve.  reserve 20% of interface bw */
756	if (opts->rtsc_m2 != 0) {
757		/* add this queue to the sum */
758		sc.m1 = opts->rtsc_m1;
759		sc.d = opts->rtsc_d;
760		sc.m2 = opts->rtsc_m2;
761		gsc_add_sc(&rtsc, &sc);
762		/* compare the sum with 80% of the interface */
763		sc.m1 = 0;
764		sc.d = 0;
765		sc.m2 = pa->ifbandwidth / 100 * 80;
766		if (!is_gsc_under_sc(&rtsc, &sc)) {
767			warnx("real-time sc exceeds 80%% of the interface "
768			    "bandwidth (%s)", rate2str((double)sc.m2));
769			goto err_ret;
770		}
771	}
772
773	/* check the linkshare service curve. */
774	if (opts->lssc_m2 != 0) {
775		/* add this queue to the child sum */
776		sc.m1 = opts->lssc_m1;
777		sc.d = opts->lssc_d;
778		sc.m2 = opts->lssc_m2;
779		gsc_add_sc(&lssc, &sc);
780		/* compare the sum of the children with parent's sc */
781		sc.m1 = parent->pq_u.hfsc_opts.lssc_m1;
782		sc.d = parent->pq_u.hfsc_opts.lssc_d;
783		sc.m2 = parent->pq_u.hfsc_opts.lssc_m2;
784		if (!is_gsc_under_sc(&lssc, &sc)) {
785			warnx("linkshare sc exceeds parent's sc");
786			goto err_ret;
787		}
788	}
789
790	/* check the upper-limit service curve. */
791	if (opts->ulsc_m2 != 0) {
792		if (opts->ulsc_m1 > pa->ifbandwidth ||
793		    opts->ulsc_m2 > pa->ifbandwidth) {
794			warnx("upper-limit larger than interface bandwidth");
795			goto err_ret;
796		}
797		if (opts->rtsc_m2 != 0 && opts->rtsc_m2 > opts->ulsc_m2) {
798			warnx("upper-limit sc smaller than real-time sc");
799			goto err_ret;
800		}
801	}
802
803	gsc_destroy(&rtsc);
804	gsc_destroy(&lssc);
805
806	return (0);
807
808err_ret:
809	gsc_destroy(&rtsc);
810	gsc_destroy(&lssc);
811	return (-1);
812}
813
814static int
815check_commit_hfsc(int dev, int opts, struct pf_altq *pa)
816{
817	struct pf_altq	*altq, *def = NULL;
818	int		 default_class;
819	int		 error = 0;
820
821	/* check if hfsc has one default queue for this interface */
822	default_class = 0;
823	TAILQ_FOREACH(altq, &altqs, entries) {
824		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
825			continue;
826		if (altq->qname[0] == 0)  /* this is for interface */
827			continue;
828		if (altq->parent[0] == 0)  /* dummy root */
829			continue;
830		if (altq->pq_u.hfsc_opts.flags & HFCF_DEFAULTCLASS) {
831			default_class++;
832			def = altq;
833		}
834	}
835	if (default_class != 1) {
836		warnx("should have one default queue on %s", pa->ifname);
837		return (1);
838	}
839	/* make sure the default queue is a leaf */
840	TAILQ_FOREACH(altq, &altqs, entries) {
841		if (strncmp(altq->ifname, pa->ifname, IFNAMSIZ) != 0)
842			continue;
843		if (altq->qname[0] == 0)  /* this is for interface */
844			continue;
845		if (strncmp(altq->parent, def->qname, PF_QNAME_SIZE) == 0) {
846			warnx("default queue is not a leaf");
847			error++;
848		}
849	}
850	return (error);
851}
852
853static int
854print_hfsc_opts(const struct pf_altq *a, const struct node_queue_opt *qopts)
855{
856	const struct hfsc_opts		*opts;
857	const struct node_hfsc_sc	*rtsc, *lssc, *ulsc;
858
859	opts = &a->pq_u.hfsc_opts;
860	if (qopts == NULL)
861		rtsc = lssc = ulsc = NULL;
862	else {
863		rtsc = &qopts->data.hfsc_opts.realtime;
864		lssc = &qopts->data.hfsc_opts.linkshare;
865		ulsc = &qopts->data.hfsc_opts.upperlimit;
866	}
867
868	if (opts->flags || opts->rtsc_m2 != 0 || opts->ulsc_m2 != 0 ||
869	    (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
870	    opts->lssc_d != 0))) {
871		printf("hfsc(");
872		if (opts->flags & HFCF_RED)
873			printf(" red");
874		if (opts->flags & HFCF_ECN)
875			printf(" ecn");
876		if (opts->flags & HFCF_RIO)
877			printf(" rio");
878		if (opts->flags & HFCF_CLEARDSCP)
879			printf(" cleardscp");
880		if (opts->flags & HFCF_DEFAULTCLASS)
881			printf(" default");
882		if (opts->rtsc_m2 != 0)
883			print_hfsc_sc("realtime", opts->rtsc_m1, opts->rtsc_d,
884			    opts->rtsc_m2, rtsc);
885		if (opts->lssc_m2 != 0 && (opts->lssc_m2 != a->bandwidth ||
886		    opts->lssc_d != 0))
887			print_hfsc_sc("linkshare", opts->lssc_m1, opts->lssc_d,
888			    opts->lssc_m2, lssc);
889		if (opts->ulsc_m2 != 0)
890			print_hfsc_sc("upperlimit", opts->ulsc_m1, opts->ulsc_d,
891			    opts->ulsc_m2, ulsc);
892		printf(" ) ");
893
894		return (1);
895	} else
896		return (0);
897}
898
899/*
900 * admission control using generalized service curve
901 */
902#ifdef __OpenBSD__
903#define	INFINITY	HUGE_VAL  /* positive infinity defined in <math.h> */
904#endif
905
906/* add a new service curve to a generalized service curve */
907static void
908gsc_add_sc(struct gen_sc *gsc, struct service_curve *sc)
909{
910	if (is_sc_null(sc))
911		return;
912	if (sc->d != 0)
913		gsc_add_seg(gsc, 0.0, 0.0, (double)sc->d, (double)sc->m1);
914	gsc_add_seg(gsc, (double)sc->d, 0.0, INFINITY, (double)sc->m2);
915}
916
917/*
918 * check whether all points of a generalized service curve have
919 * their y-coordinates no larger than a given two-piece linear
920 * service curve.
921 */
922static int
923is_gsc_under_sc(struct gen_sc *gsc, struct service_curve *sc)
924{
925	struct segment	*s, *last, *end;
926	double		 y;
927
928	if (is_sc_null(sc)) {
929		if (LIST_EMPTY(gsc))
930			return (1);
931		LIST_FOREACH(s, gsc, _next) {
932			if (s->m != 0)
933				return (0);
934		}
935		return (1);
936	}
937	/*
938	 * gsc has a dummy entry at the end with x = INFINITY.
939	 * loop through up to this dummy entry.
940	 */
941	end = gsc_getentry(gsc, INFINITY);
942	if (end == NULL)
943		return (1);
944	last = NULL;
945	for (s = LIST_FIRST(gsc); s != end; s = LIST_NEXT(s, _next)) {
946		if (s->y > sc_x2y(sc, s->x))
947			return (0);
948		last = s;
949	}
950	/* last now holds the real last segment */
951	if (last == NULL)
952		return (1);
953	if (last->m > sc->m2)
954		return (0);
955	if (last->x < sc->d && last->m > sc->m1) {
956		y = last->y + (sc->d - last->x) * last->m;
957		if (y > sc_x2y(sc, sc->d))
958			return (0);
959	}
960	return (1);
961}
962
963static void
964gsc_destroy(struct gen_sc *gsc)
965{
966	struct segment	*s;
967
968	while ((s = LIST_FIRST(gsc)) != NULL) {
969		LIST_REMOVE(s, _next);
970		free(s);
971	}
972}
973
974/*
975 * return a segment entry starting at x.
976 * if gsc has no entry starting at x, a new entry is created at x.
977 */
978static struct segment *
979gsc_getentry(struct gen_sc *gsc, double x)
980{
981	struct segment	*new, *prev, *s;
982
983	prev = NULL;
984	LIST_FOREACH(s, gsc, _next) {
985		if (s->x == x)
986			return (s);	/* matching entry found */
987		else if (s->x < x)
988			prev = s;
989		else
990			break;
991	}
992
993	/* we have to create a new entry */
994	if ((new = calloc(1, sizeof(struct segment))) == NULL)
995		return (NULL);
996
997	new->x = x;
998	if (x == INFINITY || s == NULL)
999		new->d = 0;
1000	else if (s->x == INFINITY)
1001		new->d = INFINITY;
1002	else
1003		new->d = s->x - x;
1004	if (prev == NULL) {
1005		/* insert the new entry at the head of the list */
1006		new->y = 0;
1007		new->m = 0;
1008		LIST_INSERT_HEAD(gsc, new, _next);
1009	} else {
1010		/*
1011		 * the start point intersects with the segment pointed by
1012		 * prev.  divide prev into 2 segments
1013		 */
1014		if (x == INFINITY) {
1015			prev->d = INFINITY;
1016			if (prev->m == 0)
1017				new->y = prev->y;
1018			else
1019				new->y = INFINITY;
1020		} else {
1021			prev->d = x - prev->x;
1022			new->y = prev->d * prev->m + prev->y;
1023		}
1024		new->m = prev->m;
1025		LIST_INSERT_AFTER(prev, new, _next);
1026	}
1027	return (new);
1028}
1029
1030/* add a segment to a generalized service curve */
1031static int
1032gsc_add_seg(struct gen_sc *gsc, double x, double y, double d, double m)
1033{
1034	struct segment	*start, *end, *s;
1035	double		 x2;
1036
1037	if (d == INFINITY)
1038		x2 = INFINITY;
1039	else
1040		x2 = x + d;
1041	start = gsc_getentry(gsc, x);
1042	end = gsc_getentry(gsc, x2);
1043	if (start == NULL || end == NULL)
1044		return (-1);
1045
1046	for (s = start; s != end; s = LIST_NEXT(s, _next)) {
1047		s->m += m;
1048		s->y += y + (s->x - x) * m;
1049	}
1050
1051	end = gsc_getentry(gsc, INFINITY);
1052	for (; s != end; s = LIST_NEXT(s, _next)) {
1053		s->y += m * d;
1054	}
1055
1056	return (0);
1057}
1058
1059/* get y-projection of a service curve */
1060static double
1061sc_x2y(struct service_curve *sc, double x)
1062{
1063	double	y;
1064
1065	if (x <= (double)sc->d)
1066		/* y belongs to the 1st segment */
1067		y = x * (double)sc->m1;
1068	else
1069		/* y belongs to the 2nd segment */
1070		y = (double)sc->d * (double)sc->m1
1071			+ (x - (double)sc->d) * (double)sc->m2;
1072	return (y);
1073}
1074
1075/*
1076 * misc utilities
1077 */
1078#define	R2S_BUFS	8
1079#define	RATESTR_MAX	16
1080
1081char *
1082rate2str(double rate)
1083{
1084	char		*buf;
1085	static char	 r2sbuf[R2S_BUFS][RATESTR_MAX];  /* ring bufer */
1086	static int	 idx = 0;
1087	int		 i;
1088	static const char unit[] = " KMG";
1089
1090	buf = r2sbuf[idx++];
1091	if (idx == R2S_BUFS)
1092		idx = 0;
1093
1094	for (i = 0; rate >= 1000 && i <= 3; i++)
1095		rate /= 1000;
1096
1097	if ((int)(rate * 100) % 100)
1098		snprintf(buf, RATESTR_MAX, "%.2f%cb", rate, unit[i]);
1099	else
1100		snprintf(buf, RATESTR_MAX, "%d%cb", (int)rate, unit[i]);
1101
1102	return (buf);
1103}
1104
1105u_int32_t
1106getifspeed(char *ifname)
1107{
1108#ifdef __OpenBSD__
1109	int		s;
1110	struct ifreq	ifr;
1111	struct if_data	ifrdat;
1112
1113	if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1114		err(1, "socket");
1115	bzero(&ifr, sizeof(ifr));
1116	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1117	    sizeof(ifr.ifr_name))
1118		errx(1, "getifspeed: strlcpy");
1119	ifr.ifr_data = (caddr_t)&ifrdat;
1120	if (ioctl(s, SIOCGIFDATA, (caddr_t)&ifr) == -1)
1121		err(1, "SIOCGIFDATA");
1122	if (shutdown(s, SHUT_RDWR) == -1)
1123		err(1, "shutdown");
1124	if (close(s))
1125		err(1, "close");
1126	return ((u_int32_t)ifrdat.ifi_baudrate);
1127#else
1128	int			 s;
1129	struct ifdatareq	 ifdr;
1130	struct if_data		*ifrdat;
1131
1132	if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1133		err(1, "getifspeed: socket");
1134	memset(&ifdr, 0, sizeof(ifdr));
1135	if (strlcpy(ifdr.ifdr_name, ifname, sizeof(ifdr.ifdr_name)) >=
1136	    sizeof(ifdr.ifdr_name))
1137		errx(1, "getifspeed: strlcpy");
1138	if (ioctl(s, SIOCGIFDATA, &ifdr) == -1)
1139		err(1, "getifspeed: SIOCGIFDATA");
1140	ifrdat = &ifdr.ifdr_data;
1141	if (shutdown(s, SHUT_RDWR) == -1)
1142		err(1, "getifspeed: shutdown");
1143	if (close(s) == -1)
1144		err(1, "getifspeed: close");
1145	return ((u_int32_t)ifrdat->ifi_baudrate);
1146#endif
1147}
1148
1149u_long
1150getifmtu(char *ifname)
1151{
1152	int		s;
1153	struct ifreq	ifr;
1154
1155	if ((s = socket(AF_INET, SOCK_DGRAM, 0)) < 0)
1156		err(1, "socket");
1157	bzero(&ifr, sizeof(ifr));
1158	if (strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)) >=
1159	    sizeof(ifr.ifr_name))
1160		errx(1, "getifmtu: strlcpy");
1161	if (ioctl(s, SIOCGIFMTU, (caddr_t)&ifr) == -1)
1162		err(1, "SIOCGIFMTU");
1163	if (shutdown(s, SHUT_RDWR) == -1)
1164		err(1, "shutdown");
1165	if (close(s))
1166		err(1, "close");
1167	if (ifr.ifr_mtu > 0)
1168		return (ifr.ifr_mtu);
1169	else {
1170		warnx("could not get mtu for %s, assuming 1500", ifname);
1171		return (1500);
1172	}
1173}
1174
1175int
1176eval_queue_opts(struct pf_altq *pa, struct node_queue_opt *opts,
1177    u_int32_t ref_bw)
1178{
1179	int	errors = 0;
1180
1181	switch (pa->scheduler) {
1182	case ALTQT_CBQ:
1183		pa->pq_u.cbq_opts = opts->data.cbq_opts;
1184		break;
1185	case ALTQT_PRIQ:
1186		pa->pq_u.priq_opts = opts->data.priq_opts;
1187		break;
1188	case ALTQT_HFSC:
1189		pa->pq_u.hfsc_opts.flags = opts->data.hfsc_opts.flags;
1190		if (opts->data.hfsc_opts.linkshare.used) {
1191			pa->pq_u.hfsc_opts.lssc_m1 =
1192			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m1,
1193			    ref_bw);
1194			pa->pq_u.hfsc_opts.lssc_m2 =
1195			    eval_bwspec(&opts->data.hfsc_opts.linkshare.m2,
1196			    ref_bw);
1197			pa->pq_u.hfsc_opts.lssc_d =
1198			    opts->data.hfsc_opts.linkshare.d;
1199		}
1200		if (opts->data.hfsc_opts.realtime.used) {
1201			pa->pq_u.hfsc_opts.rtsc_m1 =
1202			    eval_bwspec(&opts->data.hfsc_opts.realtime.m1,
1203			    ref_bw);
1204			pa->pq_u.hfsc_opts.rtsc_m2 =
1205			    eval_bwspec(&opts->data.hfsc_opts.realtime.m2,
1206			    ref_bw);
1207			pa->pq_u.hfsc_opts.rtsc_d =
1208			    opts->data.hfsc_opts.realtime.d;
1209		}
1210		if (opts->data.hfsc_opts.upperlimit.used) {
1211			pa->pq_u.hfsc_opts.ulsc_m1 =
1212			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m1,
1213			    ref_bw);
1214			pa->pq_u.hfsc_opts.ulsc_m2 =
1215			    eval_bwspec(&opts->data.hfsc_opts.upperlimit.m2,
1216			    ref_bw);
1217			pa->pq_u.hfsc_opts.ulsc_d =
1218			    opts->data.hfsc_opts.upperlimit.d;
1219		}
1220		break;
1221	default:
1222		warnx("eval_queue_opts: unknown scheduler type %u",
1223		    opts->qtype);
1224		errors++;
1225		break;
1226	}
1227
1228	return (errors);
1229}
1230
1231u_int32_t
1232eval_bwspec(struct node_queue_bw *bw, u_int32_t ref_bw)
1233{
1234	if (bw->bw_absolute > 0)
1235		return (bw->bw_absolute);
1236
1237	if (bw->bw_percent > 0)
1238		return (ref_bw / 100 * bw->bw_percent);
1239
1240	return (0);
1241}
1242
1243void
1244print_hfsc_sc(const char *scname, u_int m1, u_int d, u_int m2,
1245    const struct node_hfsc_sc *sc)
1246{
1247	printf(" %s", scname);
1248
1249	if (d != 0) {
1250		printf("(");
1251		if (sc != NULL && sc->m1.bw_percent > 0)
1252			printf("%u%%", sc->m1.bw_percent);
1253		else
1254			printf("%s", rate2str((double)m1));
1255		printf(" %u", d);
1256	}
1257
1258	if (sc != NULL && sc->m2.bw_percent > 0)
1259		printf(" %u%%", sc->m2.bw_percent);
1260	else
1261		printf(" %s", rate2str((double)m2));
1262
1263	if (d != 0)
1264		printf(")");
1265}
1266